summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/acpica/nsobject.c4
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/libata-zpodd.c34
-rw-r--r--drivers/auxdisplay/Kconfig38
-rw-r--r--drivers/auxdisplay/Makefile2
-rw-r--r--drivers/auxdisplay/charlcd.c55
-rw-r--r--drivers/auxdisplay/hd44780.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/swnode.c4
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/null_blk_main.c5
-rw-r--r--drivers/block/paride/pcd.c20
-rw-r--r--drivers/block/paride/pf.c26
-rw-r--r--drivers/block/rbd.c28
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/block/zram/zram_drv.c32
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c4
-rw-r--r--drivers/char/tpm/tpm-dev-common.c9
-rw-r--r--drivers/char/tpm/tpm-interface.c14
-rw-r--r--drivers/clocksource/clps711x-timer.c44
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-ti-dm.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/crypto/caam/caamhash.c13
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c34
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c5
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c110
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c48
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c69
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.c35
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c72
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-quirks.c11
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c26
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c31
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c12
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c34
-rw-r--r--drivers/infiniband/hw/mlx5/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/iommu/amd_iommu.c16
-rw-r--r--drivers/iommu/amd_iommu_init.c9
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c19
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/iommu/iova.c5
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic.c45
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c8
-rw-r--r--drivers/irqchip/irq-ls1x.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-mmp.c2
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c3
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-init.c2
-rw-r--r--drivers/md/dm-integrity.c16
-rw-r--r--drivers/md/dm-rq.c11
-rw-r--r--drivers/md/dm-table.c39
-rw-r--r--drivers/md/dm.c30
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c65
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/mmc/host/alcor.c59
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c16
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c8
-rw-r--r--drivers/mmc/host/sdhci-omap.c41
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c6
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/dsa/qca8k.c174
-rw-r--r--drivers/net/dsa/qca8k.h13
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c42
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h93
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c317
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/sis/sis900.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c48
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/broadcom.c13
-rw-r--r--drivers/net/phy/dp83822.c34
-rw-r--r--drivers/net/phy/meson-gxl.c6
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c4
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c10
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c2
-rw-r--r--drivers/reset/reset-meson-audio-arb.c1
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h7
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qedi/qedi_main.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_lib.c15
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/storvsc_drv.c15
-rw-r--r--drivers/soc/bcm/bcm2835-power.c49
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/Kconfig1
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c10
-rw-r--r--drivers/staging/erofs/dir.c45
-rw-r--r--drivers/staging/erofs/unzip_vle.c45
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c7
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts29
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi73
-rw-r--r--drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt48
-rw-r--r--drivers/staging/mt7621-eth/Kconfig39
-rw-r--r--drivers/staging/mt7621-eth/Makefile14
-rw-r--r--drivers/staging/mt7621-eth/TODO13
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c250
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h15
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h277
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c297
-rw-r--r--drivers/staging/mt7621-eth/mdio.c275
-rw-r--r--drivers/staging/mt7621-eth/mdio.h27
-rw-r--r--drivers/staging/mt7621-eth/mdio_mt7620.c173
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c2176
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h716
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c161
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet.c40
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h1
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c9
-rw-r--r--drivers/thermal/cpu_cooling.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c21
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/mtk_thermal.c7
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c24
-rw-r--r--drivers/tty/serial/atmel_serial.c52
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c12
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c5
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c9
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c14
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/xen/privcmd-buf.c3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
456 files changed, 3644 insertions, 6755 deletions
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..5e9d7348c16f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Enable the requested GPE */
+ /* Clear the GPE status */
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+ /* Enable the requested GPE */
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 8638f43cfc3d..79d86da1c892 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
}
}
+ if (obj_desc->common.type == ACPI_TYPE_REGION) {
+ acpi_ut_remove_address_range(obj_desc->region.space_id, node);
+ }
+
/* Clear the Node entry in all cases */
node->object = NULL;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbabf1233..eec263c9019e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /* Initialize debug output. Linux does not use ACPICA defaults */
- acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fca1420..d4244e7d0e38 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
cpc_read(cpunum, nominal_reg, &nom);
perf_caps->nominal_perf = nom;
- cpc_read(cpunum, guaranteed_reg, &guaranteed);
- perf_caps->guaranteed_perf = guaranteed;
+ if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
+ IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+ perf_caps->guaranteed_perf = 0;
+ } else {
+ cpc_read(cpunum, guaranteed_reg, &guaranteed);
+ perf_caps->guaranteed_perf = guaranteed;
+ }
cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 78db97687f26..c4b06cc075f9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ put_device(dev);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+ mm = alloc->vma_vm_mm;
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
- if (vma) {
- if (!mmget_not_zero(alloc->vma_vm_mm))
- goto err_mmget;
- mm = alloc->vma_vm_mm;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
- }
list_lru_isolate(lru, item);
spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
-
- up_read(&mm->mmap_sem);
- mmput(mm);
}
+ up_write(&mm->mmap_sem);
+ mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9953a8..173e6f2dd9af 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
/* Per the spec, only slot type and drawer type ODD can be supported */
static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
{
- char buf[16];
+ char *buf;
unsigned int ret;
- struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct rm_feature_desc *desc;
struct ata_taskfile tf;
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
- 0, sizeof(buf),
+ 0, 16,
0, 0, 0,
};
+ buf = kzalloc(16, GFP_KERNEL);
+ if (!buf)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+ desc = (void *)(buf + 8);
+
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_PIO;
- tf.lbam = sizeof(buf);
+ tf.lbam = 16;
ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
- buf, sizeof(buf), 0);
- if (ret)
+ buf, 16, 0);
+ if (ret) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (be16_to_cpu(desc->feature_code) != 3)
+ if (be16_to_cpu(desc->feature_code) != 3) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_SLOT;
- else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ } else if (desc->mech_type == 1 && desc->load == 0 &&
+ desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_DRAWER;
- else
+ } else {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
}
/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 57410f9c5d44..c52c738e554a 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -164,9 +164,7 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
-endif # AUXDISPLAY
-
-menuconfig PANEL
+menuconfig PARPORT_PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
select CHARLCD
@@ -178,7 +176,7 @@ menuconfig PANEL
compiled as a module, or linked into the kernel and started at boot.
If you don't understand what all this is about, say N.
-if PANEL
+if PARPORT_PANEL
config PANEL_PARPORT
int "Default parallel port number (0=LPT1)"
@@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL
Default for the 'BL' pin in custom profile is '0' (uncontrolled).
+endif # PARPORT_PANEL
+
config PANEL_CHANGE_MESSAGE
bool "Change LCD initialization message ?"
+ depends on CHARLCD
default "n"
---help---
This allows you to replace the boot message indicating the kernel version
@@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE
An empty message will only clear the display at driver init time. Any other
printf()-formatted message is valid with newline and escape codes.
-endif # PANEL
+choice
+ prompt "Backlight initial state"
+ default CHARLCD_BL_FLASH
+
+ config CHARLCD_BL_OFF
+ bool "Off"
+ help
+ Backlight is initially turned off
+
+ config CHARLCD_BL_ON
+ bool "On"
+ help
+ Backlight is initially turned on
+
+ config CHARLCD_BL_FLASH
+ bool "Flash"
+ help
+ Backlight is flashed briefly on init
+
+endchoice
+
+endif # AUXDISPLAY
+
+config PANEL
+ tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
+ depends on PARPORT
+ select AUXDISPLAY
+ select PARPORT_PANEL
config CHARLCD
tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 7ac6776ca3f6..cf54b5efb07e 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
obj-$(CONFIG_HD44780) += hd44780.o
obj-$(CONFIG_HT16K33) += ht16k33.o
-obj-$(CONFIG_PANEL) += panel.o
+obj-$(CONFIG_PARPORT_PANEL) += panel.o
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 60e0b772673f..92745efefb54 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -91,7 +91,7 @@ struct charlcd_priv {
unsigned long long drvdata[0];
};
-#define to_priv(p) container_of(p, struct charlcd_priv, lcd)
+#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd)
/* Device single-open policy control */
static atomic_t charlcd_available = ATOMIC_INIT(1);
@@ -105,7 +105,7 @@ static void long_sleep(int ms)
/* turn the backlight on or off */
static void charlcd_backlight(struct charlcd *lcd, int on)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
if (!lcd->ops->backlight)
return;
@@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work)
/* turn the backlight on for a little while */
void charlcd_poke(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
if (!lcd->ops->backlight)
return;
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke);
static void charlcd_gotoxy(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
unsigned int addr;
/*
@@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd)
static void charlcd_home(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
priv->addr.x = 0;
priv->addr.y = 0;
@@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd)
static void charlcd_print(struct charlcd *lcd, char c)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
if (priv->addr.x < lcd->bwidth) {
if (lcd->char_conv)
@@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd)
/* clears the display and resets X/Y */
static void charlcd_clear_display(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
priv->addr.x = 0;
@@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd)
static int charlcd_init_display(struct charlcd *lcd)
{
void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
u8 init;
if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
@@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
static inline int handle_lcd_special_code(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
/* LCD special codes */
@@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
static void charlcd_write_char(struct charlcd *lcd, char c)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
/* first, we'll test if we're in escape mode */
if ((c != '\n') && priv->esc_seq.len >= 0) {
@@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
static int charlcd_open(struct inode *inode, struct file *file)
{
- struct charlcd_priv *priv = to_priv(the_charlcd);
+ struct charlcd_priv *priv = charlcd_to_priv(the_charlcd);
int ret;
ret = -EBUSY;
@@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
}
}
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
+#else
+#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n"
+#endif
+
+#ifdef CONFIG_CHARLCD_BL_ON
+#define LCD_INIT_BL "\x1b[L+"
+#elif defined(CONFIG_CHARLCD_BL_FLASH)
+#define LCD_INIT_BL "\x1b[L*"
+#else
+#define LCD_INIT_BL "\x1b[L-"
+#endif
+
/* initialize the LCD driver */
static int charlcd_init(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
int ret;
if (lcd->ops->backlight) {
@@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd)
return ret;
/* display a short message */
-#ifdef CONFIG_PANEL_CHANGE_MESSAGE
-#ifdef CONFIG_PANEL_BOOT_MESSAGE
- charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
-#endif
-#else
- charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
-#endif
+ charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT);
+
/* clear the display on the next device opening */
priv->must_clear = true;
charlcd_home(lcd);
@@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size)
}
EXPORT_SYMBOL_GPL(charlcd_alloc);
+void charlcd_free(struct charlcd *lcd)
+{
+ kfree(charlcd_to_priv(lcd));
+}
+EXPORT_SYMBOL_GPL(charlcd_free);
+
static int panel_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
@@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register);
int charlcd_unregister(struct charlcd *lcd)
{
- struct charlcd_priv *priv = to_priv(lcd);
+ struct charlcd_priv *priv = charlcd_to_priv(lcd);
unregister_reboot_notifier(&panel_notifier);
charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 9ad93ea42fdc..ab15b64707ad 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev)
return 0;
fail:
- kfree(lcd);
+ charlcd_free(lcd);
return ret;
}
@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
struct charlcd *lcd = platform_get_drvdata(pdev);
charlcd_unregister(lcd);
+
+ charlcd_free(lcd);
return 0;
}
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 21b9b2f2470a..e06de63497cf 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1620,7 +1620,7 @@ err_lcd_unreg:
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
- kfree(lcd.charlcd);
+ charlcd_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port)
if (lcd.enabled) {
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
- kfree(lcd.charlcd);
+ charlcd_free(lcd.charlcd);
lcd.charlcd = NULL;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 76c9969b7124..96a6dc9d305c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- genpd_lock(genpd);
-
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
+ genpd_lock(genpd);
+
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
@@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- out:
genpd_unlock(genpd);
-
+ out:
if (ret)
genpd_free_dev_data(dev, gpd_data);
else
@@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
genpd->device_count--;
genpd->max_off_time_changed = true;
- if (genpd->detach_dev)
- genpd->detach_dev(genpd, dev);
-
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
genpd_free_dev_data(dev, gpd_data);
return 0;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 1fad9291f6aa..7fc5a18e02ad 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-struct fwnode_handle *
+static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct software_node *swnode = to_software_node(fwnode);
@@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
NULL;
}
-struct fwnode_handle *
+static struct fwnode_handle *
software_node_get_next_child(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e6edd568214..bf1c61cab8eb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return -EBADF;
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
- if (l->lo_state == Lo_unbound) {
+ if (l->lo_state != Lo_bound) {
return -EINVAL;
}
f = l->lo_backing_file;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 417a9f15c116..d7ac09c092f2 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
return -EINVAL;
}
+ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+ pr_err("null_blk: invalid home_node value\n");
+ g_home_node = NUMA_NO_NODE;
+ }
+
if (g_queue_mode == NULL_Q_RQ) {
pr_err("null_blk: legacy IO path no longer available\n");
return -EINVAL;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 96670eefaeb2..6d415b20fb70 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
+ put_disk(disk);
disk->queue = NULL;
continue;
}
@@ -749,8 +750,14 @@ static int pcd_detect(void)
return 0;
printk("%s: No CD-ROM drive found\n", name);
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+ blk_cleanup_queue(cd->disk->queue);
+ cd->disk->queue = NULL;
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
+ }
pi_unregister_driver(par_drv);
return -1;
}
@@ -1006,8 +1013,14 @@ static int __init pcd_init(void)
pcd_probe_capabilities();
if (register_blkdev(major, name)) {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+
+ blk_cleanup_queue(cd->disk->queue);
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
+ }
return -EBUSY;
}
@@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+ if (!cd->disk)
+ continue;
+
if (cd->present) {
del_gendisk(cd->disk);
pi_release(cd->pi);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e92e7a8eeeb2..35e6e271b219 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -761,8 +761,14 @@ static int pf_detect(void)
return 0;
printk("%s: No ATAPI disk detected\n", name);
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
+ blk_cleanup_queue(pf->disk->queue);
+ pf->disk->queue = NULL;
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
+ }
pi_unregister_driver(par_drv);
return -1;
}
@@ -1025,8 +1031,13 @@ static int __init pf_init(void)
pf_busy = 0;
if (register_blkdev(major, name)) {
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+ if (!pf->disk)
+ continue;
+ blk_cleanup_queue(pf->disk->queue);
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
+ }
return -EBUSY;
}
@@ -1047,13 +1058,18 @@ static void __exit pf_exit(void)
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->present)
+ if (!pf->disk)
continue;
- del_gendisk(pf->disk);
+
+ if (pf->present)
+ del_gendisk(pf->disk);
+
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
- pi_release(pf->pi);
+
+ if (pf->present)
+ pi_release(pf->pi);
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4ba967d65cf9..2210c1b9491b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private)
pctx->opts->queue_depth = intval;
break;
case Opt_alloc_size:
- if (intval < 1) {
+ if (intval < SECTOR_SIZE) {
pr_err("alloc_size out of range\n");
return -EINVAL;
}
@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
kref_put(&rbdc->kref, rbd_client_release);
}
-static int wait_for_latest_osdmap(struct ceph_client *client)
-{
- u64 newest_epoch;
- int ret;
-
- ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
- if (ret)
- return ret;
-
- if (client->osdc.osdmap->epoch >= newest_epoch)
- return 0;
-
- ceph_osdc_maybe_request_map(&client->osdc);
- return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
- client->options->mount_timeout);
-}
-
/*
* Get a ceph client with specific addr and configuration, if one does
* not exist create it. Either way, ceph_opts is consumed by this
@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
* Using an existing client. Make sure ->pg_pools is up to
* date before we look up the pool id in do_rbd_add().
*/
- ret = wait_for_latest_osdmap(rbdc->client);
+ ret = ceph_wait_for_latest_osdmap(rbdc->client,
+ rbdc->client->options->mount_timeout);
if (ret) {
rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
rbd_put_client(rbdc);
@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX);
- blk_queue_io_min(q, objset_bytes);
- blk_queue_io_opt(q, objset_bytes);
+ blk_queue_io_min(q, rbd_dev->opts->alloc_size);
+ blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
if (rbd_dev->opts->trim) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = objset_bytes;
+ q->limits.discard_granularity = rbd_dev->opts->alloc_size;
blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 87ccef4bd69e..32a21b8d1d85 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
return 0;
err_read:
+ /* prevent double queue cleanup */
+ ace->gd->queue = NULL;
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e7a5f1d1c314..399cad7daae7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
int index;
- char mode_buf[8];
- ssize_t sz;
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
- if (sz <= 0)
- return -EINVAL;
-
- /* ignore trailing new line */
- if (mode_buf[sz - 1] == '\n')
- mode_buf[sz - 1] = 0x00;
-
- if (strcmp(mode_buf, "all"))
+ if (!sysfs_streq(buf, "all"))
return -EINVAL;
down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
- ssize_t ret, sz;
- char mode_buf[8];
- int mode = -1;
+ ssize_t ret;
+ int mode;
unsigned long blk_idx = 0;
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
- if (sz <= 0)
- return -EINVAL;
-
- /* ignore trailing newline */
- if (mode_buf[sz - 1] == '\n')
- mode_buf[sz - 1] = 0x00;
-
- if (!strcmp(mode_buf, "idle"))
+ if (sysfs_streq(buf, "idle"))
mode = IDLE_WRITEBACK;
- else if (!strcmp(mode_buf, "huge"))
+ else if (sysfs_streq(buf, "huge"))
mode = HUGE_WRITEBACK;
-
- if (mode == -1)
+ else
return -EINVAL;
down_read(&zram->init_lock);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ded198328f21..7db48ae65cd2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
return 0;
}
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
0, "OOB Wake-on-BT", data);
if (ret) {
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
data->oob_wake_irq = irq;
- disable_irq(irq);
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
return 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
- depends on TTY
+ depends on TTY && BROKEN
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index d8b77133a83a..f824563fc28d 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -37,8 +37,8 @@
*
* Returns size of the event. If it is an invalid event, returns 0.
*/
-static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
- struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ struct tcg_pcr_event *event_header)
{
struct tcg_efi_specid_event_head *efispecid;
struct tcg_event_field *event_field;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 8856cce5a23b..817ae09a369e 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
__poll_t mask = 0;
poll_wait(file, &priv->async_wait, wait);
+ mutex_lock(&priv->buffer_mutex);
- if (!priv->response_read || priv->response_length)
+ /*
+ * The response_length indicates if there is still response
+ * (or part of it) to be consumed. Partial reads decrease it
+ * by the number of bytes read, and write resets it the zero.
+ */
+ if (priv->response_length)
mask = EPOLLIN | EPOLLRDNORM;
else
mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_unlock(&priv->buffer_mutex);
return mask;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 83ece5639f86..ae1030c9b086 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev)
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
return 0;
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- mutex_lock(&chip->tpm_mutex);
- if (!tpm_chip_start(chip)) {
+ if (!tpm_chip_start(chip)) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_STATE);
- tpm_chip_stop(chip);
- }
- mutex_unlock(&chip->tpm_mutex);
- } else {
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+ else
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+ tpm_chip_stop(chip);
}
return rc;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index a8dd80576c95..857f8c086274 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void)
return ~readw(tcd);
}
-static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
+static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
{
- unsigned long rate;
-
- if (!base)
- return -ENOMEM;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
- rate = clk_get_rate(clock);
+ unsigned long rate = clk_get_rate(clock);
tcd = base;
@@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
clocksource_mmio_readw_down);
sched_clock_register(clps711x_sched_clock_read, 16, rate);
-
- return 0;
}
static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
@@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
struct clock_event_device *clkevt;
unsigned long rate;
- if (!irq)
- return -EINVAL;
- if (!base)
- return -ENOMEM;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
if (!clkevt)
return -ENOMEM;
@@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
"clps711x-timer", clkevt);
}
-void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
- unsigned int irq)
-{
- struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
- struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
-
- BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
- BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
-}
-
-#ifdef CONFIG_TIMER_OF
static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ if (!base)
+ return -ENOMEM;
+ if (!irq)
+ return -EINVAL;
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
- return _clps711x_clksrc_init(clock, base);
+ clps711x_clksrc_init(clock, base);
+ break;
case CLPS711X_CLKSRC_CLOCKEVENT:
return _clps711x_clkevt_init(clock, base, irq);
default:
return -EINVAL;
}
+
+ return 0;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
-#endif
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 54f8a331b53a..37671a5d4ed9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-struct irqaction gic_compare_irqaction = {
+static struct irqaction gic_compare_irqaction = {
.handler = gic_compare_interrupt,
.percpu_dev_id = &gic_clockevent_device,
.flags = IRQF_PERCPU | IRQF_TIMER,
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 43f4d5c4d6fa..f987027ca566 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs)
return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
}
-void tc_clksrc_suspend(struct clocksource *cs)
+static void tc_clksrc_suspend(struct clocksource *cs)
{
int i;
@@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs)
bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
}
-void tc_clksrc_resume(struct clocksource *cs)
+static void tc_clksrc_resume(struct clocksource *cs)
{
int i;
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e8163693e936..5e6038fbf115 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
.name = "riscv_clocksource",
.rating = 300,
- .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = riscv_clocksource_rdtime,
};
@@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return error;
}
- sched_clock_register(riscv_sched_clock,
- BITS_PER_LONG, riscv_timebase);
+ sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index c364027638e1..3352da6ed61f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -586,8 +586,8 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
}
/* Optimized set_load which removes costly spin wait in timer_start */
-int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
- unsigned int load)
+static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
+ int autoreload, unsigned int load)
{
u32 l;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e22f0dbaebb1..2986119dd31f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
if (ret)
return ret;
- return cppc_perf.guaranteed_perf;
+ if (cppc_perf.guaranteed_perf)
+ return cppc_perf.guaranteed_perf;
+
+ return cppc_perf.nominal_perf;
}
#else /* CONFIG_ACPI_CPPC_LIB */
@@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void)
const struct x86_cpu_id *id;
int rc;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
if (no_load)
return -ENODEV;
@@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void)
} else {
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
- pr_info("CPU ID not supported\n");
+ pr_info("CPU model not supported\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427766b8..2b51e0718c9f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- kfree(priv);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index b1eadc6652b5..7205d9f4029e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (mapped_nents) {
+ if (mapped_nents)
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + sec4_sg_src_index,
0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- } else {
+ else
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- }
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 4e0eede599a8..ac0301b69593 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dmadev->nr_channels = nr_channels;
dmadev->nr_requests = nr_requests;
- ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
dmadev->ahb_addr_masks,
count);
- if (ret)
- return ret;
dmadev->nr_ahb_addr_masks = count;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0cea73..12acdac85820 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (err < 0)
goto out;
- if (err & BIT(pos))
- err = -EACCES;
+ if (value & BIT(pos)) {
+ err = -EPERM;
+ goto out;
+ }
err = 0;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4fb9e7..217507002dbc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->offset_timer =
devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+ if (!gpio->offset_timer)
+ return -ENOMEM;
return aspeed_gpio_setup_irqs(gpio, pdev);
}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd2369c2ca..a09d2f9ebacc 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ if (index < 0)
+ goto err_destroy;
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959e8993..b6a4efce7c92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
struct gpio_mockup_chip *chip;
struct seq_file *sfile;
struct gpio_chip *gc;
+ int val, cnt;
char buf[3];
- int val, rv;
if (*ppos != 0)
return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
gc = &chip->gc;
val = gpio_mockup_get(gc, priv->offset);
- snprintf(buf, sizeof(buf), "%d\n", val);
+ cnt = snprintf(buf, sizeof(buf), "%d\n", val);
- rv = copy_to_user(usr_buf, buf, sizeof(buf));
- if (rv)
- return rv;
-
- return sizeof(buf) - 1;
+ return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
}
static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab70f6e..6a3ec575a404 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* to determine if the flags should have inverted semantics.
*/
if (IS_ENABLED(CONFIG_SPI_MASTER) &&
- of_property_read_bool(np, "cs-gpios")) {
+ of_property_read_bool(np, "cs-gpios") &&
+ !strcmp(propname, "cs-gpios")) {
struct device_node *child;
u32 cs;
int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
* conflict and the "spi-cs-high" flag will
* take precedence.
*/
- if (of_property_read_bool(np, "spi-cs-high")) {
+ if (of_property_read_bool(child, "spi-cs-high")) {
if (*flags & OF_GPIO_ACTIVE_LOW) {
pr_warn("%s GPIO handle specifies active low - ignored\n",
- of_node_full_name(np));
+ of_node_full_name(child));
*flags &= ~OF_GPIO_ACTIVE_LOW;
}
} else {
if (!(*flags & OF_GPIO_ACTIVE_LOW))
pr_info("%s enforce active low on chipselect handle\n",
- of_node_full_name(np));
+ of_node_full_name(child));
*flags |= OF_GPIO_ACTIVE_LOW;
}
break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
- return of_gpiochip_scan_gpios(chip);
+ status = of_gpiochip_scan_gpios(chip);
+ if (status) {
+ of_node_put(chip->of_node);
+ gpiochip_remove_pin_ranges(chip);
+ }
+
+ return status;
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af0733581..0495bf1d480a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
}
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_set_config(chip, gpio, packed);
+ rc = chip->set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..5d8b30fd4534 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3173,11 +3173,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
break;
if (fence) {
- r = dma_fence_wait_timeout(fence, false, tmo);
+ tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
fence = next;
- if (r <= 0)
+ if (tmo == 0) {
+ r = -ETIMEDOUT;
break;
+ } else if (tmo < 0) {
+ r = tmo;
+ break;
+ }
} else {
fence = next;
}
@@ -3188,8 +3193,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
- if (r <= 0 || tmo <= 0) {
- DRM_ERROR("recover vram bo from shadow failed\n");
+ if (r < 0 || tmo <= 0) {
+ DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO;
}
@@ -3625,6 +3630,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
struct pci_dev *pdev = adev->pdev;
enum pci_bus_speed cur_speed;
enum pcie_link_width cur_width;
+ u32 ret = 1;
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3638,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
while (pdev) {
cur_speed = pcie_get_speed_cap(pdev);
cur_width = pcie_get_width_cap(pdev);
+ ret = pcie_bandwidth_available(adev->pdev, NULL,
+ NULL, &cur_width);
+ if (!ret)
+ cur_width = PCIE_LNK_WIDTH_RESRV;
if (cur_speed != PCI_SPEED_UNKNOWN) {
if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 0b8ef2d27d6b..fe393a46f881 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
/*
* IB
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
* cost waiting for it coming back under RUNTIME only
*/
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ } else if (adev->gmc.xgmi.hive_id) {
+ tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
}
for (i = 0; i < adev->num_rings; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bfa9062ce6b9..16fcb56c232b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
+ vm->bulk_moveable &= list_empty(&vm->evicted);
+
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d0309e8c9d12..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- adev->gfx.rlc.funcs->reset(adev);
-
gfx_v9_0_init_pg(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 600259b4e291..2fe8397241ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
}
ring->vm_inv_eng = inv_eng - 1;
- change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 8be9677c0c07..cf9a49f49d3a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
{ 0x15DD, &raven_device_info }, /* Raven */
+ { 0x15D8, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fb27783d7a54..3082b55b1e77 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane,
amdgpu_crtc->cursor_width = plane->state->crtc_w;
amdgpu_crtc->cursor_height = plane->state->crtc_h;
+ memset(&attributes, 0, sizeof(attributes));
attributes.address.high_part = upper_32_bits(address);
attributes.address.low_part = lower_32_bits(address);
attributes.width = plane->state->crtc_w;
@@ -5429,9 +5430,11 @@ static void get_freesync_config_for_crtc(
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(new_con_state->base.connector);
struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
- aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
if (new_crtc_state->vrr_supported) {
new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4eba3c4800b6..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->link->ddc, 0,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 683829466a44..0ba68d41b9c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
- //account for cases where we see negative offset relative to overlay plane
- if (src_x_offset < 0 && src_y_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, 0,
- CURSOR_Y_POSITION, 0);
- x_hotspot -= src_x_offset;
- y_hotspot -= src_y_offset;
- } else if (src_x_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, 0,
- CURSOR_Y_POSITION, pos->y);
- x_hotspot -= src_x_offset;
- } else if (src_y_offset < 0) {
- REG_SET_2(CURSOR_POSITION, 0,
+ REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, 0);
- y_hotspot -= src_y_offset;
- } else {
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
- }
+ CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, x_hotspot,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9aa7bec1b5fe..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
* MP0CLK DS
*/
data->registry_data.disallowed_features = 0xE0041C00;
+ /* ECC feature should be disabled on old SMUs */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ hwmgr->smu_version = smum_get_argument(hwmgr);
+ if (hwmgr->smu_version < 0x282100)
+ data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+ data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"FCLK_DS",
"MP1CLK_DS",
"MP0CLK_DS",
- "XGMI"};
+ "XGMI",
+ "ECC"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
struct vega20_single_dpm_table *dpm_table;
bool vblank_too_short = false;
bool disable_mclk_switching;
+ bool disable_fclk_switching;
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
if (hwmgr->display_config->nb_pstate_switch_disable)
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ if ((disable_mclk_switching &&
+ (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+ hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+ disable_fclk_switching = true;
+ else
+ disable_fclk_switching = false;
+
/* fclk */
dpm_table = &(data->dpm_table.fclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
- if (hwmgr->display_config->nb_pstate_switch_disable)
+ if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
/* vclk */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index a5bc758ae097..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -80,6 +80,7 @@ enum {
GNLD_DS_MP1CLK,
GNLD_DS_MP0CLK,
GNLD_XGMI,
+ GNLD_ECC,
GNLD_FEATURES_MAX
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
#define FEATURE_DS_MP1CLK_BIT 30
#define FEATURE_DS_MP0CLK_BIT 31
#define FEATURE_XGMI_BIT 32
-#define FEATURE_SPARE_33_BIT 33
+#define FEATURE_ECC_BIT 33
#define FEATURE_SPARE_34_BIT 34
#define FEATURE_SPARE_35_BIT 35
#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
-#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT )
+#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
+#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index a63e5f0dae56..db761329a1e3 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1037,6 +1037,31 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
}
EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
+/* Filter out invalid setups to avoid configuring SCDC and scrambling */
+static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
+{
+ struct drm_display_info *display = &hdmi->connector.display_info;
+
+ /* Completely disable SCDC support for older controllers */
+ if (hdmi->version < 0x200a)
+ return false;
+
+ /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ /*
+ * Disable if display only support low TMDS rates and scrambling
+ * for low rates is not supported either
+ */
+ if (!display->hdmi.scdc.scrambling.low_rates &&
+ display->max_tmds_clock <= 340000)
+ return false;
+
+ return true;
+}
+
/*
* HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
* - The Source shall suspend transmission of the TMDS clock and data
@@ -1055,7 +1080,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
- if (hdmi->connector.display_info.hdmi.scdc.supported) {
+ if (dw_hdmi_support_scdc(hdmi)) {
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
else
@@ -1579,8 +1604,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
- vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
- hdmi_info->scdc.scrambling.low_rates ?
+ (dw_hdmi_support_scdc(hdmi) &&
+ (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+ hdmi_info->scdc.scrambling.low_rates)) ?
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
@@ -1646,7 +1672,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
}
/* Scrambling Control */
- if (hdmi_info->scdc.supported) {
+ if (dw_hdmi_support_scdc(hdmi)) {
if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
hdmi_info->scdc.scrambling.low_rates) {
/*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 40ac19848034..fbb76332cc9f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->atomic_disable(crtc, old_crtc_state);
else if (funcs->disable)
funcs->disable(crtc);
- else
+ else if (funcs->dpms)
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (!(dev->irq_enabled && dev->num_crtcs))
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
if (new_crtc_state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
-
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_crtc_state);
- else
+ else if (funcs->commit)
funcs->commit(crtc);
}
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..05bbc2b622fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
-
- mutex_lock(&drm_global_mutex);
- if (dev->open_count == 0)
- drm_dev_put(dev);
- mutex_unlock(&drm_global_mutex);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_dev_unplug);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..af2ab640cadb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
best_depth = fmt->depth;
}
}
- if (sizes.surface_depth != best_depth) {
+ if (sizes.surface_depth != best_depth && best_depth) {
DRM_INFO("requested bpp %d, scaled depth down to %d",
sizes.surface_bpp, best_depth);
sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..7caa3c7ed978 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
drm_close_helper(filp);
- if (!--dev->open_count) {
+ if (!--dev->open_count)
drm_lastclose(dev);
- if (drm_dev_is_unplugged(dev))
- drm_put_dev(dev);
- }
+
mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 0573eab0e190..f35e4ab55b27 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -20,6 +20,7 @@
#include "regs-vp.h"
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
-static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+static bool mixer_is_synced(struct mixer_context *ctx)
{
- /* block update on vsync */
- mixer_reg_writemask(ctx, MXR_STATUS, enable ?
- MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+ u32 base, shadow;
+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+ ctx->mxr_ver == MXR_VER_128_0_0_184)
+ return !(mixer_reg_read(ctx, MXR_CFG) &
+ MXR_CFG_LAYER_UPDATE_COUNT_MASK);
+
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+ vp_reg_read(ctx, VP_SHADOW_UPDATE))
+ return false;
+
+ base = mixer_reg_read(ctx, MXR_CFG);
+ shadow = mixer_reg_read(ctx, MXR_CFG_S);
+ if (base != shadow)
+ return false;
+
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
+ return false;
+
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
+ return false;
+
+ return true;
+}
+
+static int mixer_wait_for_sync(struct mixer_context *ctx)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100000);
+
+ while (!mixer_is_synced(ctx)) {
+ usleep_range(1000, 2000);
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static void mixer_disable_sync(struct mixer_context *ctx)
+{
+ mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
+}
+
+static void mixer_enable_sync(struct mixer_context *ctx)
+{
+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+ ctx->mxr_ver == MXR_VER_128_0_0_184)
+ mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
- vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
- VP_SHADOW_UPDATE_ENABLE : 0);
+ vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
}
static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
spin_lock_irqsave(&ctx->reg_slock, flags);
- vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_regs_dump(ctx);
}
-static void mixer_layer_update(struct mixer_context *ctx)
-{
- mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
-}
-
static void mixer_graph_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
- /* layer update mandatory for mixer 16.0.33.0 */
- if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
- ctx->mxr_ver == MXR_VER_128_0_0_184)
- mixer_layer_update(ctx);
-
spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct mixer_context *ctx = arg;
- u32 val, base, shadow;
+ u32 val;
spin_lock(&ctx->reg_slock);
@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
val &= ~MXR_INT_STATUS_VSYNC;
/* interlace scan need to check shadow register */
- if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
- if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
- vp_reg_read(ctx, VP_SHADOW_UPDATE))
- goto out;
-
- base = mixer_reg_read(ctx, MXR_CFG);
- shadow = mixer_reg_read(ctx, MXR_CFG_S);
- if (base != shadow)
- goto out;
-
- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
- if (base != shadow)
- goto out;
-
- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
- if (base != shadow)
- goto out;
- }
+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
+ && !mixer_is_synced(ctx))
+ goto out;
drm_crtc_handle_vblank(&ctx->crtc->base);
}
@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
{
- struct mixer_context *mixer_ctx = crtc->ctx;
+ struct mixer_context *ctx = crtc->ctx;
- if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+ if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
- mixer_vsync_set_update(mixer_ctx, false);
+ if (mixer_wait_for_sync(ctx))
+ dev_err(ctx->dev, "timeout waiting for VSYNC\n");
+ mixer_disable_sync(ctx);
}
static void mixer_update_plane(struct exynos_drm_crtc *crtc,
@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- mixer_vsync_set_update(mixer_ctx, true);
+ mixer_enable_sync(mixer_ctx);
exynos_crtc_handle_event(crtc);
}
@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
exynos_drm_pipe_clk_enable(crtc, true);
- mixer_vsync_set_update(ctx, false);
+ mixer_disable_sync(ctx);
mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_commit(ctx);
- mixer_vsync_set_update(ctx, true);
+ mixer_enable_sync(ctx);
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618..3592d04c33b2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
}
if (index_mode) {
- if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+ if (guest_gma >= I915_GTT_PAGE_SIZE) {
ret = -EFAULT;
goto err;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
/**
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
* @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
*
* This function is used to trigger hotplug interrupt for vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..69a9a1b2ea4a 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
- int ret;
+ int ret, tile_height = 1;
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
@@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
break;
case PLANE_CTL_TILED_X:
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+ tile_height = 8;
break;
case PLANE_CTL_TILED_Y:
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+ tile_height = 32;
break;
case PLANE_CTL_TILED_YF:
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+ tile_height = 32;
break;
default:
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
}
-
- info->size = (((p.stride * p.height * p.bpp) / 8) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
@@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_hot = UINT_MAX;
info->y_hot = UINT_MAX;
}
-
- info->size = (((info->stride * c.height * c.bpp) / 8)
- + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
gvt_vgpu_err("invalid plane id:%d\n", plane_id);
return -EINVAL;
}
+ info->size = (info->stride * roundup(info->height, tile_height)
+ + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd2d8d5..9814773882ec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_vgpu_ppgtt_spt *spt, *spn;
struct radix_tree_iter iter;
- void **slot;
+ LIST_HEAD(all_spt);
+ void __rcu **slot;
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
spt = radix_tree_deref_slot(slot);
- ppgtt_free_spt(spt);
+ list_move(&spt->post_shadow_list, &all_spt);
}
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+ ppgtt_free_spt(spt);
}
static int ppgtt_handle_guest_write_page_table_bytes(
@@ -1882,7 +1888,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
}
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+ mutex_lock(&gvt->gtt.ppgtt_mm_lock);
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
return mm;
}
@@ -1942,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
- atomic_dec(&mm->pincount);
+ atomic_dec_if_positive(&mm->pincount);
}
/**
@@ -1967,9 +1977,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (ret)
return ret;
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_move_tail(&mm->ppgtt_mm.lru_list,
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
}
return 0;
@@ -1980,6 +1991,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
struct intel_vgpu_mm *mm;
struct list_head *pos, *n;
+ mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
@@ -1987,9 +2000,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
continue;
list_del_init(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
invalidate_ppgtt_mm(mm);
return 1;
}
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
return 0;
}
@@ -2659,6 +2674,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
}
}
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+ mutex_init(&gvt->gtt.ppgtt_mm_lock);
return 0;
}
@@ -2699,7 +2715,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
list_del_init(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
if (mm->ppgtt_mm.shadowed)
invalidate_ppgtt_mm(mm);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04cc946d..edb610dc5d86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
+ struct mutex ppgtt_mm_lock;
struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d5fcc447d22f..a68addf95c23 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write)
{
- void *aperture_va;
+ void __iomem *aperture_va;
if (!intel_vgpu_in_aperture(vgpu, off) ||
!intel_vgpu_in_aperture(vgpu, off + count)) {
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EIO;
if (is_write)
- memcpy(aperture_va + offset_in_page(off), buf, count);
+ memcpy_toio(aperture_va + offset_in_page(off), buf, count);
else
- memcpy(buf, aperture_va + offset_in_page(off), count);
+ memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
io_mapping_unmap(aperture_va);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051a..7902fb162d09 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -1;
+ return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- return ret;
- }
-
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ if (ret < 0) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ goto err_req;
+ }
+
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = prepare_workload(workload);
out:
+ if (ret) {
+ /* We might still need to add request with
+ * clean ctx to retire it properly..
+ */
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
+ }
+
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
goto out;
}
- if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ if (!scheduler->current_vgpu->active ||
+ list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
goto out;
/*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_runtime_pm_put_unchecked(dev_priv);
}
- if (ret && (vgpu_is_vm_unhealthy(ret))) {
- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ if (ret) {
+ if (vgpu_is_vm_unhealthy(ret))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
intel_vgpu_destroy_workload(workload);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
&ctx);
if (ret) {
- ret = -EINTR;
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
break;
}
crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69c..a67a63b5aa84 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
- INTEL_DEVID(dev_priv) == 0x87C0)
+ INTEL_DEVID(dev_priv) == 0x87C0 || \
+ INTEL_DEVID(dev_priv) == 0x87CA)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 30d516e975c6..8558e81fdc2a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1734,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
- i915_gem_object_put(obj);
- return -ENXIO;
+ addr = -ENXIO;
+ goto err;
+ }
+
+ if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+ addr = -EINVAL;
+ goto err;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1749,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- i915_gem_object_put(obj);
- return -EINTR;
+ addr = -EINTR;
+ goto err;
}
vma = find_vma(mm, addr);
if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1768,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
i915_gem_object_put(obj);
args->addr_ptr = (u64)addr;
-
return 0;
err:
i915_gem_object_put(obj);
-
return addr;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9a65341fec09..aa6791255252 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
i915_error_generate_code(error, engines));
if (engines) {
/* Just show the first executing process, more is confusing */
- i = ffs(engines);
+ i = __ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..047855dd8c6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN11_EU_DISABLE _MMIO(0x9134)
#define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
_TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE (1 << 4)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 73a7bee24a66..641e0778fa9c 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
}
}
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+ struct intel_dsi *intel_dsi)
+{
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ WARN_ON(intel_dsi->io_wakeref[port]);
+ intel_dsi->io_wakeref[port] =
+ intel_display_power_get(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
+ }
+}
+
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
}
- for_each_dsi_port(port, intel_dsi->ports) {
- intel_dsi->io_wakeref[port] =
- intel_display_power_get(dev_priv,
- port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
- }
+ get_dsi_io_power_domains(dev_priv, intel_dsi);
}
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
}
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
POSTING_READ(DPCLKA_CFGCR0_ICL);
mutex_unlock(&dev_priv->dpll_lock);
@@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
DRM_ERROR("DDI port:%c buffer not idle\n",
port_name(port));
}
- gen11_dsi_ungate_clocks(encoder);
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
return 0;
}
-static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u64 domains = 0;
- enum port port;
-
- for_each_dsi_port(port, intel_dsi->ports)
- if (port == PORT_A)
- domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
- else
- domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
-
- return domains;
+ get_dsi_io_power_domains(to_i915(encoder->base.dev),
+ enc_to_intel_dsi(&encoder->base));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b508d8a735e0..4364f42cac6b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
info->supports_dp = (port != PORT_E);
+ info->supports_edp = (port == PORT_A);
}
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 14d580cdefd3..ab4e60dfd6a3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
intel_aux_power_domain(dig_port);
}
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
- u64 domains;
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
* hook.
*/
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
- return 0;
+ return;
dig_port = enc_to_dig_port(&encoder->base);
- domains = BIT_ULL(dig_port->ddi_io_power_domain);
+ intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
- domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
/*
* VDSC power is needed when DSC is enabled
*/
if (crtc_state->dsc_params.compression_enable)
- domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
-
- return domains;
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
return;
}
/*
- * DSI ports should have their DDI clock ungated when disabled
- * and gated when enabled.
+ * For DSI we keep the ddi clocks gated
+ * except during enable/disable sequence.
*/
- ddi_clk_needed = !encoder->base.crtc;
+ ddi_clk_needed = false;
}
val = I915_READ(DPCLKA_CFGCR0_ICL);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ccb616351bba..421aac80a838 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
for_each_intel_encoder(&dev_priv->drm, encoder) {
- u64 get_domains;
- enum intel_display_power_domain domain;
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
continue;
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
- get_domains = encoder->get_power_domains(encoder, crtc_state);
- for_each_power_domain(domain, get_domains)
- intel_display_power_get(dev_priv, domain);
+ encoder->get_power_domains(encoder, crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cf709835fb9a..8891f29a8c7f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- const struct link_config_limits *limits)
-{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
-
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
-
- for (lane_count = limits->min_lane_count;
- lane_count <= limits->max_lane_count;
- lane_count <<= 1) {
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
-
- return 0;
- }
- }
- }
- }
-
- return -EINVAL;
-}
-
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -2031,15 +1995,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = 6 * 3;
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
+ if (intel_dp_is_edp(intel_dp)) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The eDP 1.3 and earlier panels
- * are generally designed to support only a single clock and
- * lane configuration, and typically these values correspond to
- * the native resolution of the panel. With eDP 1.4 rate select
- * and DSC, this is decreasingly the case, and we need to be
- * able to select less than maximum link config.
+ * advertizes being capable of. The panels are generally
+ * designed to support only a single clock and lane
+ * configuration, and typically these values correspond to the
+ * native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2053,22 +2015,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- if (intel_dp_is_edp(intel_dp))
- /*
- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
- * section A.1: "It is recommended that the minimum number of
- * lanes be used, using the minimum link rate allowed for that
- * lane configuration."
- *
- * Note that we use the max clock and lane count for eDP 1.3 and
- * earlier, and fast vs. wide is irrelevant.
- */
- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
- &limits);
- else
- /* Optimize for slow and wide. */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
- &limits);
+ /*
+ * Optimize for slow and wide. This is the place to add alternative
+ * optimization policy.
+ */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 15db41394b9e..d5660ac1b0d6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -270,10 +270,12 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_state *pipe_config);
- /* Returns a mask of power domains that need to be referenced as part
- * of the hardware state readout code. */
- u64 (*get_power_domains)(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state);
+ /*
+ * Acquires the power domains needed for an active encoder during
+ * hardware state readout.
+ */
+ void (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..b9b0ea4e2404 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
- if (!ctx)
+ if (IS_ERR(ctx))
break;
/* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 6403728fe778..31c93c3ccd00 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
+static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+ case PIPEMISC_DITHER_6_BPC:
+ return 18;
+ case PIPEMISC_DITHER_8_BPC:
+ return 24;
+ case PIPEMISC_DITHER_10_BPC:
+ return 30;
+ case PIPEMISC_DITHER_12_BPC:
+ return 36;
+ default:
+ MISSING_CASE(tmp);
+ return 0;
+ }
+}
+
static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
/* Enable Frame time stamo based scanline reporting */
adjusted_mode->private_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 22e68a100e7b..5d333138f913 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
static unsigned int mt2701_calculate_factor(int clock)
{
if (clock <= 64000)
- return 16;
- else if (clock <= 128000)
- return 8;
- else if (clock <= 256000)
return 4;
- else
+ else if (clock <= 128000)
return 2;
+ else
+ return 1;
}
static const struct mtk_dpi_conf mt8173_conf = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf59ea9bccfd..57ce4708ef1b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
+ .gem_prime_vmap = mtk_drm_gem_prime_vmap,
+ .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
+ ret = drm_fbdev_generic_setup(drm, 32);
+ if (ret)
+ DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
return 0;
err_deinit:
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 259b7b0de1d2..38483e9ee071 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -241,3 +241,49 @@ err_gem_free:
kfree(mtk_gem);
return ERR_PTR(ret);
}
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct sg_table *sgt;
+ struct sg_page_iter iter;
+ unsigned int npages;
+ unsigned int i = 0;
+
+ if (mtk_gem->kvaddr)
+ return mtk_gem->kvaddr;
+
+ sgt = mtk_gem_prime_get_sg_table(obj);
+ if (IS_ERR(sgt))
+ return NULL;
+
+ npages = obj->size >> PAGE_SHIFT;
+ mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+ if (!mtk_gem->pages)
+ goto out;
+
+ for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+ mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+ if (i > npages)
+ break;
+ }
+ mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
+out:
+ kfree((void *)sgt);
+
+ return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+ if (!mtk_gem->pages)
+ return;
+
+ vunmap(vaddr);
+ mtk_gem->kvaddr = 0;
+ kfree((void *)mtk_gem->pages);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 534639b43a1c..c047a7ef294f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
dma_addr_t dma_addr;
unsigned long dma_attrs;
struct sg_table *sg;
+ struct page **pages;
};
#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 915cc84621ae..e04e6c293d39 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (IS_ERR(regmap))
ret = PTR_ERR(regmap);
if (ret) {
- ret = PTR_ERR(regmap);
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
index 4ef9c57ffd44..5223498502c4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
.owner = THIS_MODULE,
};
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- hdmi_phy->pll_rate = rate;
- if (rate <= 74250000)
- *parent_rate = rate;
- else
- *parent_rate = rate / 2;
-
- return rate;
-}
-
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- return hdmi_phy->pll_rate;
-}
-
void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 bits)
{
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
return NULL;
}
-static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
- const struct clk_ops **ops)
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+ struct clk_init_data *clk_init)
{
- if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
- *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
- else
- dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+ clk_init->flags = hdmi_phy->conf->flags;
+ clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
}
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
struct clk_init_data clk_init = {
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
- .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
};
struct phy *phy;
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
hdmi_phy->dev = dev;
hdmi_phy->conf =
(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
- mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+ mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
hdmi_phy->pll_hw.init = &clk_init;
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
if (IS_ERR(hdmi_phy->pll)) {
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
index f39b1fc66612..2d8b3182470d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
struct mtk_hdmi_phy_conf {
bool tz_disabled;
+ unsigned long flags;
const struct clk_ops *hdmi_phy_clk_ops;
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 val, u32 mask);
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate);
extern struct platform_driver mtk_hdmi_phy_driver;
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
index fcc42dc6ea7f..d3cc4022e988 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
usleep_range(80, 100);
}
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= 64000000)
pos_div = 3;
- else if (rate <= 12800000)
- pos_div = 1;
+ else if (rate <= 128000000)
+ pos_div = 2;
else
pos_div = 1;
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
RG_HTPLL_IC_MASK);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ unsigned long out_rate, val;
+
+ val = (readl(hdmi_phy->regs + HDMI_CON6)
+ & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+ switch (val) {
+ case 0x00:
+ out_rate = parent_rate;
+ break;
+ case 0x01:
+ out_rate = parent_rate / 2;
+ break;
+ default:
+ out_rate = parent_rate / 4;
+ break;
+ }
+
+ val = (readl(hdmi_phy->regs + HDMI_CON6)
+ & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+ out_rate *= (val + 1) * 2;
+ val = (readl(hdmi_phy->regs + HDMI_CON2)
+ & RG_HDMITX_TX_POSDIV_MASK);
+ out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+ if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+ out_rate /= 5;
+
+ return out_rate;
+}
+
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
.tz_disabled = true,
+ .flags = CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index ed5916b27658..47f8a2951682 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
usleep_range(100, 150);
}
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ hdmi_phy->pll_rate = rate;
+ if (rate <= 74250000)
+ *parent_rate = rate;
+ else
+ *parent_rate = rate / 2;
+
+ return rate;
+}
+
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ return hdmi_phy->pll_rate;
+}
+
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..8a4ebcb6405c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
ret = drm_dev_register(drm, 0);
if (ret)
- goto free_drm;
+ goto uninstall_irq;
drm_fbdev_generic_setup(drm, 32);
return 0;
+uninstall_irq:
+ drm_irq_uninstall(drm);
free_drm:
drm_dev_put(drm);
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
static void meson_drv_unbind(struct device *dev)
{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct meson_drm *priv = drm->dev_private;
+ struct meson_drm *priv = dev_get_drvdata(dev);
+ struct drm_device *drm = priv->drm;
if (priv->canvas) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
}
drm_dev_unregister(drm);
+ drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f4ea6c..563953ec6ad0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
/* If sink max TMDS clock, we reject the mode */
- if (mode->clock > connector->display_info.max_tmds_clock)
+ if (connector->display_info.max_tmds_clock &&
+ mode->clock > connector->display_info.max_tmds_clock)
return MODE_BAD;
/* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 88a52f6b39fe..7dfbbbc1beea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
- if (IS_ERR_VALUE(ret) && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES)
return ret;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index aa9fec80492d..40c47d6a7d78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -100,12 +100,10 @@ static void
nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
{
struct nouveau_dmem_chunk *chunk;
- struct nouveau_drm *drm;
unsigned long idx;
chunk = (void *)hmm_devmem_page_get_drvdata(page);
idx = page_to_pfn(page) - chunk->pfn_first;
- drm = chunk->drm;
/*
* FIXME:
@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
/* FIXME handle pin failure */
WARN_ON(ret);
}
- list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- /* FIXME handle pin failure */
- WARN_ON(ret);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
nouveau_bo_unpin(chunk->bo);
}
- list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
- nouveau_bo_unpin(chunk->bo);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
*/
drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
device, size);
- if (drm->dmem->devmem == NULL) {
+ if (IS_ERR(drm->dmem->devmem)) {
kfree(drm->dmem);
drm->dmem = NULL;
return;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 340383150fb9..ebf9c96d43ee 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
hdmi4_core_disable(core);
return 0;
}
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
if (err)
return err;
+ /*
+ * Initialize CEC clock divider: CEC needs 2MHz clock hence
+ * set the divider to 24 to get 48/24=2MHz clock
+ */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
/* Clear TX FIFO */
if (!hdmi_cec_clear_tx_fifo(adap)) {
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
- return -EIO;
+ err = -EIO;
+ goto err_disable_clk;
}
/* Clear RX FIFO */
if (!hdmi_cec_clear_rx_fifo(adap)) {
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
- return -EIO;
+ err = -EIO;
+ goto err_disable_clk;
}
/* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
}
return 0;
+
+err_disable_clk:
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+ hdmi4_core_disable(core);
+
+ return err;
}
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
return ret;
core->wp = wp;
- /*
- * Initialize CEC clock divider: CEC needs 2MHz clock hence
- * set the devider to 24 to get 48/24=2MHz clock
- */
- REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+ /* Disable clock initially, hdmi_cec_adap_enable() manages it */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
ret = cec_register_adapter(core->adap, &pdev->dev);
if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 813ba42f2753..e384b95ad857 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
else
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
/*
- * The I2S input word length is twice the lenght given in the IEC-60958
+ * The I2S input word length is twice the length given in the IEC-60958
* status word. If the word size is greater than
* 20 bits, increment by one.
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c6073ea5..0d4ade9d4722 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
clk_disable(vop->hclk);
}
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+ if (win->phy->scl && win->phy->scl->ext) {
+ VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+ }
+
+ VOP_WIN_SET(vop, win, enable, 0);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
}
spin_unlock(&vop->reg_lock);
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
spin_unlock(&vop->reg_lock);
}
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
int channel = i * 2 + 1;
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
VOP_WIN_SET(vop, win, gate, 1);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index dc47720c99ba..39d8509d96a0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -48,8 +48,13 @@ static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
- /* This is max for HDMI 2.0b (4K@60Hz) */
- if (mode->clock > 594000)
+ /*
+ * Controller support maximum of 594 MHz, which correlates to
+ * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
+ * 340 MHz scrambling has to be enabled. Because scrambling is
+ * not yet implemented, just limit to 340 MHz for now.
+ */
+ if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index fc36e0c10a37..b1e7c76e9c17 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
err_unregister_gates:
for (i = 0; i < CLK_NUM; i++)
- if (clk_data->hws[i])
+ if (!IS_ERR_OR_NULL(clk_data->hws[i]))
clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
err_assert_reset:
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
of_clk_del_provider(dev->of_node);
for (i = 0; i < CLK_NUM; i++)
- clk_hw_unregister_gate(clk_data->hws[i]);
+ if (clk_data->hws[i])
+ clk_hw_unregister_gate(clk_data->hws[i]);
clk_disable_unprepare(tcon_top->bus);
reset_control_assert(tcon_top->rst);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cfb8c3d..b3436c2aed68 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc;
u32 value;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
+ dc = to_tegra_dc(old_state->crtc);
+
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9623de..982ce37ecde1 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
if (vic->booted)
return 0;
+#ifdef CONFIG_IOMMU_API
if (vic->config->supports_sid) {
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
vic_writel(vic, value, VIC_THI_STREAMID1);
}
}
+#endif
/* setup clockgating registers */
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c24590f..c1bd5e3d9e4a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
#include "udl_connector.h"
#include "udl_drv.h"
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
- u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
{
int ret, i;
u8 *read_buff;
+ struct udl_device *udl = data;
read_buff = kmalloc(2, GFP_KERNEL);
if (!read_buff)
- return false;
+ return -1;
- for (i = 0; i < EDID_LENGTH; i++) {
- int bval = (i + block_idx * EDID_LENGTH) << 8;
+ for (i = 0; i < len; i++) {
+ int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udl->udev,
usb_rcvctrlpipe(udl->udev, 0),
(0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);
- return false;
+ return -1;
}
- buff[i] = read_buff[1];
+ buf[i] = read_buff[1];
}
kfree(read_buff);
- return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
- int *result_buff_size)
-{
- int i, extensions;
- u8 *block_buff = NULL, *buff_ptr;
-
- block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (block_buff == NULL)
- return false;
-
- if (udl_get_edid_block(udl, 0, block_buff) &&
- memchr_inv(block_buff, 0, EDID_LENGTH)) {
- extensions = ((struct edid *)block_buff)->extensions;
- if (extensions > 0) {
- /* we have to read all extensions one by one */
- *result_buff_size = EDID_LENGTH * (extensions + 1);
- *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
- buff_ptr = *result_buff;
- if (buff_ptr == NULL) {
- kfree(block_buff);
- return false;
- }
- memcpy(buff_ptr, block_buff, EDID_LENGTH);
- kfree(block_buff);
- buff_ptr += EDID_LENGTH;
- for (i = 1; i < extensions; ++i) {
- if (udl_get_edid_block(udl, i, buff_ptr)) {
- buff_ptr += EDID_LENGTH;
- } else {
- kfree(*result_buff);
- *result_buff = NULL;
- return false;
- }
- }
- return true;
- }
- /* we have only base edid block */
- *result_buff = block_buff;
- *result_buff_size = EDID_LENGTH;
- return true;
- }
-
- kfree(block_buff);
-
- return false;
+ return 0;
}
static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- u8 *edid_buff = NULL;
- int edid_buff_size = 0;
struct udl_device *udl = connector->dev->dev_private;
struct udl_drm_connector *udl_connector =
container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
udl_connector->edid = NULL;
}
-
- if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+ udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+ if (!udl_connector->edid)
return connector_status_disconnected;
- udl_connector->edid = (struct edid *)edid_buff;
-
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 22cd2d13e272..ff47f890e6ad 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -52,6 +52,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
+ .release = udl_driver_release,
/* gem hooks */
.gem_free_object_unlocked = udl_gem_free_object,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index e9e9b1ff678e..4ae67d882eae 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
int udl_driver_load(struct drm_device *dev, unsigned long flags);
void udl_driver_unload(struct drm_device *dev);
+void udl_driver_release(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d5a23295dd80..bb7b58407039 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
- drm_gem_object_put(&gobj->base);
+ drm_gem_object_put_unlocked(&gobj->base);
unlock:
mutex_unlock(&udl->gem_lock);
return ret;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 9086d0d1b880..1f8ef34ade24 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
- udl_modeset_cleanup(dev);
kfree(udl);
}
+
+void udl_driver_release(struct drm_device *dev)
+{
+ udl_modeset_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(dev);
+}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930facd6d2d..11a8f99ba18c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_put_unlocked(&obj->base);
if (ret)
- goto err;
+ return ERR_PTR(ret);
return &obj->base;
-
-err:
- __vgem_gem_destroy(obj);
- return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb325cf..69048e73377d 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->gem, handle);
drm_gem_object_put_unlocked(&obj->gem);
- if (ret) {
- drm_gem_object_release(&obj->gem);
- kfree(obj);
+ if (ret)
return ERR_PTR(ret);
- }
return &obj->gem;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b913a56f3426..2a9112515f46 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
- struct drm_display_mode *old_mode;
struct drm_display_mode *mode;
int ret;
- old_mode = par->set_mode;
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->vdisplay = var->yres;
vmw_guess_mode_timing(mode);
- if (old_mode && drm_mode_equal(old_mode, mode)) {
- drm_mode_destroy(vmw_priv->dev, mode);
- mode = old_mode;
- old_mode = NULL;
- } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
schedule_delayed_work(&par->local_work, 0);
out_unlock:
- if (old_mode)
- drm_mode_destroy(vmw_priv->dev, old_mode);
+ if (par->set_mode)
+ drm_mode_destroy(vmw_priv->dev, par->set_mode);
par->set_mode = mode;
mutex_unlock(&par->bo_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b93c558dd86e..7da752ca1c34 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
- return id;
+ return (id != -ENOMEM ? 0 : id);
spin_lock(&gman->lock);
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 27101c04a827..4030d64916f0 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -114,7 +114,7 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
static void host1x_channel_set_streamid(struct host1x_channel *channel)
{
-#if HOST1X_HW >= 6
+#if IS_ENABLED(CONFIG_IOMMU_API) && HOST1X_HW >= 6
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..4ca0cdfa6b33 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
tristate "Asus"
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
+ select POWER_SUPPLY
---help---
Support for Asus notebook built-in keyboard and touchpad via i2c, and
the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..860e21ec6a49 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
- if (n > 32) {
- hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+ if (n > 256) {
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
n, current->comm);
- n = 32;
+ n = 256;
}
return __extract(report, offset, n);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
+ if (down_interruptible(&hdev->driver_input_lock))
+ return 0;
+
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
+ up(&hdev->driver_input_lock);
+
return 0;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..adce58f24f76 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1083,6 +1083,7 @@
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..1fce0076e7dc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
+ case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..199cc256e9d9 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ if (!data->wq) {
+ kfree(data->effect_ids);
+ kfree(data);
+ return -ENOMEM;
+ }
+
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
- data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- hidpp_scroll_counter_handle_scroll(
- &hidpp->vertical_wheel_counter, v);
+ if (v != 0)
+ hidpp_scroll_counter_handle_scroll(
+ &hidpp->vertical_wheel_counter, v);
input_sync(mydata->input);
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..77ffba48cc73 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ }
};
-/**
+/*
* hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
*
* There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
if (hdev->product == 0x0401 &&
strncmp(hdev->name, "ELAN0800", 8) != 0)
return true;
+ /* Same with product id 0x0400 */
+ if (hdev->product == 0x0400 &&
+ strncmp(hdev->name, "QTEC0001", 8) != 0)
+ return true;
break;
}
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
}
if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
bl_entry->driver_data, bl_entry->vendor,
bl_entry->product);
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
quirks |= bl_entry->driver_data;
if (quirks)
- dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
quirks, hdev->vendor, hdev->product);
return quirks;
}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
static int steam_register(struct steam_device *steam)
{
int ret;
+ bool client_opened;
/*
* This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
* Unlikely, but getting the serial could fail, and it is not so
* important, so make up a serial number and go on.
*/
+ mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
strlcpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
+ mutex_unlock(&steam->mutex);
hid_info(steam->hdev, "Steam Controller '%s' connected",
steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
}
mutex_lock(&steam->mutex);
- if (!steam->client_opened) {
+ client_opened = steam->client_opened;
+ if (!client_opened)
steam_set_lizard_mode(steam, lizard_mode);
+ mutex_unlock(&steam->mutex);
+
+ if (!client_opened)
ret = steam_input_register(steam);
- } else {
+ else
ret = 0;
- }
- mutex_unlock(&steam->mutex);
return ret;
}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
+ unsigned long flags;
+ bool connected;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
mutex_lock(&steam->mutex);
steam->client_opened = false;
+ if (connected)
+ steam_set_lizard_mode(steam, lizard_mode);
mutex_unlock(&steam->mutex);
- if (steam->connected) {
- steam_set_lizard_mode(steam, lizard_mode);
+ if (connected)
steam_input_register(steam);
- }
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
goto cleanup;
}
rc = usb_string(udev, 201, ver_ptr, ver_len);
- if (ver_ptr == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
if (rc == -EPIPE) {
*ver_ptr = '\0';
} else if (rc < 0) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..4d1f24ee249c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_NO_RUNTIME_PM },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
+ { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
+ I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..d0f1dfe2bcbb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
config SENSORS_W83773G
tristate "Nuvoton W83773G"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the Nuvoton W83773G hardware
monitoring chip.
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..f9abeeeead9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
};
static const u32 ntc_temp_config[] = {
- HWMON_T_INPUT, HWMON_T_TYPE,
+ HWMON_T_INPUT | HWMON_T_TYPE,
0
};
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..4679acb4918e 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s++;
}
}
+
+ s = (sensors->power.num_sensors * 4) + 1;
} else {
for (i = 0; i < sensors->power.num_sensors; ++i) {
s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
show_power, NULL, 3, i);
attr++;
}
- }
- if (sensors->caps.num_sensors >= 1) {
s = sensors->power.num_sensors + 1;
+ }
+ if (sensors->caps.num_sensors >= 1) {
snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
0, 0);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f2c681971201..f8979abb9a19 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
Cannon Lake (PCH)
Cedar Fork (PCH)
Ice Lake (PCH)
+ Comet Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145ef5a5..679c6c41f64b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
* Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
+ * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -240,6 +241,7 @@
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
struct i801_mux_config {
char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
{ 0, }
};
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 42fed40198a0..c0c3043b5d61 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
/* Init DMA config if supported */
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret < 0)
- goto clk_notifier_unregister;
+ goto del_adapter;
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
+del_adapter:
+ i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
rpm_disable:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 612f04190ed8..9784c6c0d2ec 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
- int qos_rmt_count;
+ int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
- /* each user context requires an entry in the RMT */
- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+ /*
+ * The RMT entries are currently allocated as shown below:
+ * 1. QOS (0 to 128 entries);
+ * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+ * 3. VNIC (num_vnic_contexts).
+ * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+ * entries of RMT because both VNIC and PSM could allocate any receive
+ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+ * and PSM FECN must reserve an RMT entry for each possible PSM receive
+ * context.
+ */
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
u64 reg;
int i, idx, regoff, regidx;
u8 offset;
+ u32 total_cnt;
/* there needs to be enough room in the map table */
- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+ total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
return;
}
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
- rmt->used += dd->num_user_contexts;
+ rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b643c2409cf..eba300330a02 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) &&
!(qp->s_flags & RVT_S_BUSY) &&
!(priv->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e6726c1ab866..5991211d72bd 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -3088,7 +3088,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -3166,7 +3166,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index fdda33aca77f..43cbce7a19ea 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
make_tid_rdma_ack(qp, ohdr, ps))
return 1;
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- clear_ahg(qp);
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done_free_tx;
- }
+ /*
+ * Bail out if we can't send data.
+ * Be reminded that this check must been done after the call to
+ * make_tid_rdma_ack() because the responding QP could be in
+ * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+ */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+ goto bail;
if (priv->s_flags & RVT_S_WAIT_ACK)
goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
middle, ps);
return 1;
-done_free_tx:
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- return 1;
-
bail:
hfi1_put_txreq(ps->s_txreq);
bail_no_tx:
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f1fec56f3ff4..8e29dbb5b5fb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size;
} else {
+ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
/* mtt mhop */
i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
hem_idx = i;
hem = table->hem[hem_idx];
- dma_offset = offset = (obj & (table->num_obj - 1)) *
- table->obj_size % mhop.bt_chunk_size;
+ dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+ mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index b09f1cde2ff5..08be0e4eabcd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
__le64 *mtts;
- u32 s = start_index * sizeof(u64);
u32 bt_page_size;
u32 i;
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
return -EINVAL;
mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+ mtt->first_seg +
+ start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
&dma_handle);
if (!mtts)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 57c76eafef2f..66cdf625534f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
- if (hr_dev->caps.sccc_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
- hr_qp->qpn);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index c5a881172524..337410f40860 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
rcu_read_lock();
in = __in_dev_get_rcu(upper_dev);
- local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
+ if (!in->ifa_list)
+ local_ipaddr = 0;
+ else
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
rcu_read_unlock();
} else {
local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
case NETDEV_UP:
/* Fall through */
case NETDEV_CHANGEADDR:
+
+ /* Just skip if no need to handle ARP cache */
+ if (!local_ipaddr)
+ break;
+
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 782499abcd98..2a0b59a4b6eb 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
unsigned long flags;
for (i = 0 ; i < dev->num_ports; i++) {
- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
det = &sriov->alias_guid.ports_guid[i];
+ cancel_delayed_work_sync(&det->alias_guid_work);
spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
while (!list_empty(&det->cb_list)) {
cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index eaa055007f28..9e08df7914aa 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -20,6 +20,7 @@
enum devx_obj_flags {
DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+ DEVX_OBJ_FLAGS_DCT = 1 << 1,
};
struct devx_async_data {
@@ -39,7 +40,10 @@ struct devx_obj {
u32 dinlen; /* destroy inbox length */
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
u32 flags;
- struct mlx5_ib_devx_mr devx_mr;
+ union {
+ struct mlx5_ib_devx_mr devx_mr;
+ struct mlx5_core_dct core_dct;
+ };
};
struct devx_umem {
@@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
MLX5_GET(arm_rq_in, in, srq_number));
break;
- case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
MLX5_GET(drain_dct_in, in, dctn));
@@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_ARM_XRC_SRQ:
case MLX5_CMD_OP_ARM_RQ:
- case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
devx_cleanup_mkey(obj);
- ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+ else
+ ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
@@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
devx_set_umem_valid(cmd_in);
}
- err = mlx5_cmd_exec(dev->mdev, cmd_in,
- cmd_in_len,
- cmd_out, cmd_out_len);
+ if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+ obj->flags |= DEVX_OBJ_FLAGS_DCT;
+ err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
+ cmd_in, cmd_in_len,
+ cmd_out, cmd_out_len);
+ } else {
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ cmd_in_len,
+ cmd_out, cmd_out_len);
+ }
+
if (err)
goto obj_free;
@@ -1214,7 +1228,11 @@ err_copy:
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
devx_cleanup_mkey(obj);
obj_destroy:
- mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+ else
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
obj_free:
kfree(obj);
return err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 994c19d01211..531ff20b32ad 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
*active_speed = IB_SPEED_EDR;
break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_EDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_HDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index c20bfc41ecf1..0aa10ebda5d9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
- u64 access_mask = ODP_READ_ALLOWED_BIT;
+ u64 access_mask;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
size_t size;
@@ -607,6 +607,7 @@ next_mr:
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ access_mask = ODP_READ_ALLOWED_BIT;
if (prefetch && !downgrade && !mr->umem->writable) {
/* prefetch with write-access must
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6b1f0e76900b..7cd006da1dae 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3729,6 +3729,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
u32 min_resp_len = offsetof(typeof(resp), dctn) +
sizeof(resp.dctn);
@@ -3747,7 +3748,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
- MLX5_ST_SZ_BYTES(create_dct_in));
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
+ sizeof(out));
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6d8b3e0de57a..ec41400fec0c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
pvrdma_free_slots(dev);
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
iounmap(dev->regs);
kfree(dev->sgid_tbl);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b319e51c379b..f7cdd2ab7f11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
/* Everything is mapped - write the right values into s->dma_address */
for_each_sg(sglist, s, nelems, i) {
- s->dma_address += address + s->offset;
+ /*
+ * Add in the remaining piece of the scatter-gather offset that
+ * was masked out when we were determining the physical address
+ * via (sg_phys(s) & PAGE_MASK) earlier.
+ */
+ s->dma_address += address + (s->offset & ~PAGE_MASK);
s->dma_length = s->length;
}
@@ -3164,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ int type, prot = 0;
size_t length;
- int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
+ type = IOMMU_RESV_DIRECT;
length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
prot |= IOMMU_WRITE;
+ if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+ /* Exclusion range */
+ type = IOMMU_RESV_RESERVED;
region = iommu_alloc_resv_region(entry->address_start,
- length, prot,
- IOMMU_RESV_DIRECT);
+ length, prot, type);
if (!region) {
dev_err(dev, "Out of memory allocating dm-regions\n");
return;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f773792d77fd..ff40ba758cf3 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
{
u64 start = iommu->exclusion_start & PAGE_MASK;
- u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+ u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
u64 entry;
if (!iommu->exclusion_start)
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (e == NULL)
return -ENOMEM;
+ if (m->flags & IVMD_FLAG_EXCL_RANGE)
+ init_exclusion_range(m);
+
switch (m->type) {
default:
kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
- if (m->flags & IVMD_FLAG_EXCL_RANGE)
- init_exclusion_range(m);
- else if (m->flags & IVMD_FLAG_UNITY_MAP)
+ if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
init_unity_map_range(m);
p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..87965e4d9647 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
+
/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 87274b54febd..28cb713d728c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
+ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+ return;
+
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
- sdev->did = domain->iommu_did[iommu->seq_id];
+ sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (!(ctx_lo & CONTEXT_PASIDE)) {
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f101afc315ab..9a8a8870e267 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -160,6 +160,14 @@
#define ARM_V7S_TCR_PD1 BIT(5)
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
typedef u32 arm_v7s_iopte;
static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
void *table = NULL;
if (lvl == 1)
- table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+ table = (void *)__get_free_pages(
+ __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
else if (lvl == 2)
- table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+ table = kmem_cache_zalloc(data->l2_tables, gfp);
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys)
+ if (phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
+ dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
+ }
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2),
ARM_V7S_TABLE_SIZE(2),
- SLAB_CACHE_DMA, NULL);
+ ARM_V7S_TABLE_SLAB_FLAGS, NULL);
if (!data->l2_tables)
goto out_free_data;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33a982e33716..109de67d5d72 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ if (dom) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ }
}
group->default_domain = dom;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f8d3ba247523..2de8122e218f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
- if (limit_pfn < size || new_pfn < iovad->start_pfn)
+ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ iovad->max32_alloc_size = size;
goto iova32_full;
+ }
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return 0;
iova32_full:
- iovad->max32_alloc_size = size;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 83364fedbf0a..5e4ca139e4ea 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,14 +275,14 @@ out_free:
return ret;
}
-int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
}
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
-int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2dd1ff0cf558..7577755bdcf4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
ra = container_of(a, struct lpi_range, entry);
rb = container_of(b, struct lpi_range, entry);
- return rb->base_id - ra->base_id;
+ return ra->base_id - rb->base_id;
}
static void merge_lpi_ranges(void)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ba2a37a27a54..fd3110c171ba 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
#endif
}
-static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+static int gic_init_bases(struct gic_chip_data *gic,
struct fwnode_handle *handle)
{
- irq_hw_number_t hwirq_base;
- int gic_irqs, irq_base, ret;
+ int gic_irqs, ret;
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
/* Frankein-GIC without banked registers... */
@@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
} else { /* Legacy support */
/*
* For primary GICs, skip over SGIs.
- * For secondary GICs, skip over PPIs, too.
+ * No secondary GIC support whatsoever.
*/
- if (gic == &gic_data[0] && (irq_start & 31) > 0) {
- hwirq_base = 16;
- if (irq_start != -1)
- irq_start = (irq_start & ~31) + 16;
- } else {
- hwirq_base = 32;
- }
+ int irq_base;
- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ gic_irqs -= 16; /* calculate # of irqs to allocate */
- irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+ irq_base = irq_alloc_descs(16, 16, gic_irqs,
numa_node_id());
if (irq_base < 0) {
- WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
- irq_start);
- irq_base = irq_start;
+ WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
+ irq_base = 16;
}
gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
- hwirq_base, &gic_irq_domain_ops, gic);
+ 16, &gic_irq_domain_ops, gic);
}
if (WARN_ON(!gic->domain)) {
@@ -1195,7 +1187,6 @@ error:
}
static int __init __gic_init_bases(struct gic_chip_data *gic,
- int irq_start,
struct fwnode_handle *handle)
{
char *name;
@@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
gic_init_chip(gic, NULL, name, false);
}
- ret = gic_init_bases(gic, irq_start, handle);
+ ret = gic_init_bases(gic, handle);
if (ret)
kfree(name);
return ret;
}
-void __init gic_init(unsigned int gic_nr, int irq_start,
- void __iomem *dist_base, void __iomem *cpu_base)
+void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
{
struct gic_chip_data *gic;
- if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
- return;
-
/*
* Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these...
*/
static_branch_disable(&supports_deactivate_key);
- gic = &gic_data[gic_nr];
+ gic = &gic_data[0];
gic->raw_dist_base = dist_base;
gic->raw_cpu_base = cpu_base;
- __gic_init_bases(gic, irq_start, NULL);
+ __gic_init_bases(gic, NULL);
}
static void gic_teardown(struct gic_chip_data *gic)
@@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
if (ret)
return ret;
- ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+ ret = gic_init_bases(*gic, &dev->of_node->fwnode);
if (ret) {
gic_teardown(*gic);
return ret;
@@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_branch_disable(&supports_deactivate_key);
- ret = __gic_init_bases(gic, -1, &node->fwnode);
+ ret = __gic_init_bases(gic, &node->fwnode);
if (ret) {
gic_teardown(gic);
return ret;
@@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
return -ENOMEM;
}
- ret = __gic_init_bases(gic, -1, domain_handle);
+ ret = __gic_init_bases(gic, domain_handle);
if (ret) {
pr_err("Failed to initialise GIC\n");
irq_domain_free_fwnode(domain_handle);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index d1098f4da6a4..88df3d00052c 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
raw_spin_lock_init(&data->lock);
- of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
- of_property_read_u32(np, "fsl,channel", &data->channel);
+ ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "fsl,channel", &data->channel);
+ if (ret)
+ return ret;
/*
* There is one output irq for each group of 64 inputs.
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
index 86b72fbd3b45..353111a10413 100644
--- a/drivers/irqchip/irq-ls1x.c
+++ b/drivers/irqchip/irq-ls1x.c
@@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node,
NULL);
if (!priv->domain) {
pr_err("ls1x-irq: cannot add IRQ domain\n");
+ err = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 567b29c47608..98b6e1d4b1a6 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
void __iomem *base = d->chip_data;
u32 val;
+ if (!msg->address_lo && !msg->address_hi)
+ return;
+
base += get_mbigen_vec_reg(d->hwirq);
val = readl_relaxed(base);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 3496b61a312a..8eed478f3b7e 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
return 0;
}
-const struct irq_domain_ops mmp_irq_domain_ops = {
+static const struct irq_domain_ops mmp_irq_domain_ops = {
.map = mmp_irq_domain_map,
.xlate = mmp_irq_domain_xlate,
};
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index add4c9c934c8..18832ccc8ff8 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -478,7 +478,7 @@ dispose_irq:
return ret;
}
-struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
.ap_range = {
.first = 0,
.size = 21,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a93296b9b45d..7bd1d4cb2e19 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
const struct stm32_exti_bank *stm32_bank;
struct stm32_exti_chip_data *chip_data;
void __iomem *base = h_data->base;
- u32 irqs_mask;
stm32_bank = h_data->drv_data->exti_banks[bank_idx];
chip_data = &h_data->chips_data[bank_idx];
@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
raw_spin_lock_init(&chip_data->rlock);
- /* Determine number of irqs supported */
- writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
- irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
-
/*
* This IP has no reset, so after hot reboot we should
* clear registers to avoid residue
*/
writel_relaxed(0, base + stm32_bank->imr_ofst);
writel_relaxed(0, base + stm32_bank->emr_ofst);
- writel_relaxed(0, base + stm32_bank->rtsr_ofst);
- writel_relaxed(0, base + stm32_bank->ftsr_ofst);
- writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 4d85645c87f7..0928fd1f0e0c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
if (m->clock2)
test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
- if (ent->device == 0xB410) {
+ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b0c15d..7cb4d685a1f1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
+ const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- devid = (int)(uintptr_t)of_match_device(
- of_pca9532_leds_match, &client->dev)->data;
+ of_id = of_match_device(of_pca9532_leds_match,
+ &client->dev);
+ if (unlikely(!of_id))
+ return -EINVAL;
+ devid = (int)(uintptr_t) of_id->data;
} else {
devid = id->driver_data;
}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed46d473..136f86a1627d 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
trigger_data->net_dev = NULL;
}
- strncpy(trigger_data->device_name, buf, size);
+ memcpy(trigger_data->device_name, buf, size);
+ trigger_data->device_name[size] = 0;
if (size > 0 && trigger_data->device_name[size - 1] == '\n')
trigger_data->device_name[size - 1] = 0;
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
- && evt != NETDEV_CHANGENAME)
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- if (strcmp(dev->name, trigger_data->device_name))
+ if (!(dev == trigger_data->net_dev ||
+ (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
return NOTIFY_DONE;
cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
dev_hold(dev);
trigger_data->net_dev = dev;
break;
- case NETDEV_CHANGENAME:
case NETDEV_UNREGISTER:
- if (trigger_data->net_dev) {
- dev_put(trigger_data->net_dev);
- trigger_data->net_dev = NULL;
- }
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
break;
case NETDEV_UP:
case NETDEV_CHANGE:
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 95c6d86ab5e8..c4ef1fceead6 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -115,6 +115,7 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
+void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index b53f30f16b4d..4b76f84424c3 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -36,7 +36,7 @@ struct dm_device {
struct list_head list;
};
-const char *dm_allowed_targets[] __initconst = {
+const char * const dm_allowed_targets[] __initconst = {
"crypt",
"delay",
"linear",
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d57d997a52c8..7c678f50aaa3 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
{
return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
- range2->logical_sector + range2->n_sectors > range2->logical_sector;
+ range1->logical_sector + range1->n_sectors > range2->logical_sector;
}
static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
struct dm_integrity_range *last_range =
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
struct task_struct *last_range_task;
- if (!ranges_overlap(range, last_range))
- break;
last_range_task = last_range->task;
list_del(&last_range->wait_entry);
if (!add_new_range(ic, last_range, false)) {
@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
journal_watermark = val;
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
sync_msec = val;
- else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+ else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
if (ic->meta_dev) {
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
if (r)
goto bad;
- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
"Invalid journal_crypt argument");
if (r)
goto bad;
- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)
@@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
.io_hints = dm_integrity_io_hints,
};
-int __init dm_integrity_init(void)
+static int __init dm_integrity_init(void)
{
int r;
@@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
return r;
}
-void dm_integrity_exit(void)
+static void __exit dm_integrity_exit(void)
{
dm_unregister_target(&integrity_target);
kmem_cache_destroy(journal_io_cache);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 09773636602d..b66745bd08bb 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
}
if (unlikely(error == BLK_STS_TARGET)) {
- if (req_op(clone) == REQ_OP_WRITE_SAME &&
- !clone->q->limits.max_write_same_sectors)
+ if (req_op(clone) == REQ_OP_DISCARD &&
+ !clone->q->limits.max_discard_sectors)
+ disable_discard(tio->md);
+ else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+ !clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
- if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
- !clone->q->limits.max_write_zeroes_sectors)
+ else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+ !clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ba9481f1bf3c..cde3b49b2a91 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
+static int device_requires_stable_pages(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+}
+
+/*
+ * If any underlying device requires stable pages, a table must require
+ * them as well. Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
+ */
+static bool dm_table_requires_stable_pages(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+ return true;
+ }
+
+ return false;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_verify_integrity(t);
/*
+ * Some devices don't use blk_integrity but still want stable pages
+ * because they do their own checksumming.
+ */
+ if (dm_table_requires_stable_pages(t))
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ else
+ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
+ /*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68d24056d0b1..043f0761e4a0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
}
+void disable_discard(struct mapped_device *md)
+{
+ struct queue_limits *limits = dm_get_queue_limits(md);
+
+ /* device doesn't really support DISCARD, disable it */
+ limits->max_discard_sectors = 0;
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ if (bio_op(bio) == REQ_OP_DISCARD &&
+ !bio->bi_disk->queue->limits.max_discard_sectors)
+ disable_discard(md);
+ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- /*
- * BIO based queue uses its own splitting. When multipage bvecs
- * is switched on, size of the incoming bio may be too big to
- * be handled in some targets, such as crypt.
- *
- * When these targets are ready for the big bio, we can remove
- * the limit.
- */
- ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
+ ti->max_io_len = (uint32_t) len;
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
config MFD_SUN6I_PRCM
bool "Allwinner A31 PRCM controller"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
- .of_compatible = "sprd,sc27xx-wdt",
+ .of_compatible = "sprd,sc2731-wdt",
}, {
.name = "sc27xx-rtc",
- .of_compatible = "sprd,sc27xx-rtc",
+ .of_compatible = "sprd,sc2731-rtc",
}, {
.name = "sc27xx-charger",
- .of_compatible = "sprd,sc27xx-charger",
+ .of_compatible = "sprd,sc2731-charger",
}, {
.name = "sc27xx-chg-timer",
- .of_compatible = "sprd,sc27xx-chg-timer",
+ .of_compatible = "sprd,sc2731-chg-timer",
}, {
.name = "sc27xx-fast-chg",
- .of_compatible = "sprd,sc27xx-fast-chg",
+ .of_compatible = "sprd,sc2731-fast-chg",
}, {
.name = "sc27xx-chg-wdt",
- .of_compatible = "sprd,sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc2731-chg-wdt",
}, {
.name = "sc27xx-typec",
- .of_compatible = "sprd,sc27xx-typec",
+ .of_compatible = "sprd,sc2731-typec",
}, {
.name = "sc27xx-flash",
- .of_compatible = "sprd,sc27xx-flash",
+ .of_compatible = "sprd,sc2731-flash",
}, {
.name = "sc27xx-eic",
- .of_compatible = "sprd,sc27xx-eic",
+ .of_compatible = "sprd,sc2731-eic",
}, {
.name = "sc27xx-efuse",
- .of_compatible = "sprd,sc27xx-efuse",
+ .of_compatible = "sprd,sc2731-efuse",
}, {
.name = "sc27xx-thermal",
- .of_compatible = "sprd,sc27xx-thermal",
+ .of_compatible = "sprd,sc2731-thermal",
}, {
.name = "sc27xx-adc",
- .of_compatible = "sprd,sc27xx-adc",
+ .of_compatible = "sprd,sc2731-adc",
}, {
.name = "sc27xx-audio-codec",
- .of_compatible = "sprd,sc27xx-audio-codec",
+ .of_compatible = "sprd,sc2731-audio-codec",
}, {
.name = "sc27xx-regulator",
- .of_compatible = "sprd,sc27xx-regulator",
+ .of_compatible = "sprd,sc2731-regulator",
}, {
.name = "sc27xx-vibrator",
- .of_compatible = "sprd,sc27xx-vibrator",
+ .of_compatible = "sprd,sc2731-vibrator",
}, {
.name = "sc27xx-keypad-led",
- .of_compatible = "sprd,sc27xx-keypad-led",
+ .of_compatible = "sprd,sc2731-keypad-led",
}, {
.name = "sc27xx-bltc",
- .of_compatible = "sprd,sc27xx-bltc",
+ .of_compatible = "sprd,sc2731-bltc",
}, {
.name = "sc27xx-fgu",
- .of_compatible = "sprd,sc27xx-fgu",
+ .of_compatible = "sprd,sc2731-fgu",
}, {
.name = "sc27xx-7sreset",
- .of_compatible = "sprd,sc27xx-7sreset",
+ .of_compatible = "sprd,sc2731-7sreset",
}, {
.name = "sc27xx-poweroff",
- .of_compatible = "sprd,sc27xx-poweroff",
+ .of_compatible = "sprd,sc2731-poweroff",
}, {
.name = "sc27xx-syscon",
- .of_compatible = "sprd,sc27xx-syscon",
+ .of_compatible = "sprd,sc2731-syscon",
},
};
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
return status;
}
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
static const struct i2c_device_id twl_ids[] = {
{ "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
{ "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
/* One Client Driver , 4 Clients */
static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
+ .driver.pm = &twl_dev_pm_ops,
.id_table = twl_ids,
.probe = twl_probe,
.remove = twl_remove,
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
/* We also need to update CI for internal queues */
if (cs->submitted) {
+ int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+ WARN_ONCE((cs_cnt < 0),
+ "hl%d: error in CS active cnt %d\n",
+ hdev->id, cs_cnt);
+
hl_int_hw_queue_update_ci(cs);
spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
enum vm_type_t *vm_type;
bool once = true;
+ u64 j;
int i;
if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
} else {
phys_pg_pack = hnode->ptr;
seq_printf(s,
- " 0x%-14llx %-10u %-4u\n",
+ " 0x%-14llx %-10llu %-4u\n",
hnode->vaddr, phys_pg_pack->total_size,
phys_pg_pack->handle);
}
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
phys_pg_pack->page_size);
seq_puts(s, " physical address\n");
seq_puts(s, "---------------------\n");
- for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ for (j = 0 ; j < phys_pg_pack->npages ; j++) {
seq_printf(s, " 0x%-14llx\n",
- phys_pg_pack->pages[i]);
+ phys_pg_pack->pages[j]);
}
}
spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
#include <linux/sched/signal.h>
#include <linux/hwmon.h>
+#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
+
bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
{
if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
atomic_set(&hdev->fd_open_cnt, 0);
+ atomic_set(&hdev->cs_active_cnt, 0);
return 0;
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
pci_save_state(hdev->pdev);
+ /* Block future CS/VM/JOB completion operations */
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (rc) {
+ dev_err(hdev->dev, "Can't suspend while in reset\n");
+ return -EIO;
+ }
+
+ /* This blocks all other stuff that is not blocked by in_reset */
+ hdev->disabled = true;
+
+ /*
+ * Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ /* Flush processes that are sending message to CPU */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
- rc = pci_enable_device(hdev->pdev);
+ rc = pci_enable_device_mem(hdev->pdev);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI device in resume\n");
return rc;
}
+ pci_set_master(hdev->pdev);
+
rc = hdev->asic_funcs->resume(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to enable PCI access from device CPU\n");
- return rc;
+ dev_err(hdev->dev, "Failed to resume device after suspend\n");
+ goto disable_device;
+ }
+
+
+ hdev->disabled = false;
+ atomic_set(&hdev->in_reset, 0);
+
+ rc = hl_device_reset(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to reset device during resume\n");
+ goto disable_device;
}
return 0;
+
+disable_device:
+ pci_clear_master(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+
+ return rc;
}
static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
struct hl_device_reset_work *device_reset_work =
container_of(work, struct hl_device_reset_work, reset_work);
struct hl_device *hdev = device_reset_work->hdev;
- u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+ u16 pending_total, pending_cnt;
struct task_struct *task = NULL;
+ if (hdev->pldm)
+ pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+ else
+ pending_total = HL_PENDING_RESET_PER_SEC;
+
+ pending_cnt = pending_total;
+
/* Flush all processes that are inside hl_open */
mutex_lock(&hdev->fd_open_cnt_lock);
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
}
}
+ pending_cnt = pending_total;
+
+ while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+ pending_cnt--;
+
+ ssleep(1);
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt))
+ dev_crit(hdev->dev,
+ "Going to hard reset with open user contexts\n");
+
mutex_unlock(&hdev->fd_open_cnt_lock);
hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..ea979ebd62fb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
return retval;
}
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
- WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
/*
* goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
*
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
return retval;
}
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
- WREG32(mmMME_QM_GLBL_CFG1, 0);
- WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC0_QM_GLBL_CFG1, 0);
- WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC1_QM_GLBL_CFG1, 0);
- WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC2_QM_GLBL_CFG1, 0);
- WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC3_QM_GLBL_CFG1, 0);
- WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC4_QM_GLBL_CFG1, 0);
- WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC5_QM_GLBL_CFG1, 0);
- WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC6_QM_GLBL_CFG1, 0);
- WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC7_QM_GLBL_CFG1, 0);
- WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
static void goya_dma_stall(struct hl_device *hdev)
{
WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
- rc = goya_stop_internal_queues(hdev);
-
- if (rc) {
- dev_err(hdev->dev, "failed to stop internal queues\n");
- return rc;
- }
-
- rc = goya_stop_external_queues(hdev);
-
- if (rc) {
- dev_err(hdev->dev, "failed to stop external queues\n");
- return rc;
- }
-
rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
int goya_resume(struct hl_device *hdev)
{
- int rc;
-
- goya_resume_external_queues(hdev);
- goya_resume_internal_queues(hdev);
-
- rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
- if (rc)
- dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
- return rc;
+ return goya_init_iatu(hdev);
}
static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
*dma_handle = hdev->asic_prop.sram_base_address;
- base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+ base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
switch (queue_id) {
case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
* struct hl_vm_phys_pg_pack - physical page pack.
* @vm_type: describes the type of the virtual area descriptor.
* @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
* @asid: the context related to this list.
- * @npages: num physical pages in the pack.
* @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
* @flags: HL_MEM_* flags related to this list.
* @handle: the provided handle related to this list.
* @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
struct hl_vm_phys_pg_pack {
enum vm_type_t vm_type; /* must be first */
u64 *pages;
+ u64 npages;
+ u64 total_size;
atomic_t mapping_cnt;
u32 asid;
- u32 npages;
u32 page_size;
- u32 total_size;
u32 flags;
u32 handle;
u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
* @cb_pool_lock: protects the CB pool.
* @user_ctx: current user context executing.
* @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, KMD will restore this
* value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ * means already in H/W queues)
* @major: habanalabs KMD major.
* @high_pll: high PLL profile frequency.
* @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
struct hl_ctx *user_ctx;
atomic64_t dram_used_mem;
+ u64 timeout_jiffies;
+ u64 max_power;
atomic_t in_reset;
atomic_t curr_pll_profile;
atomic_t fd_open_cnt;
- u64 timeout_jiffies;
- u64 max_power;
+ atomic_t cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
- list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+ atomic_inc(&hdev->cs_active_cnt);
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
if (job->ext_queue)
ext_hw_queue_schedule_job(job);
else
int_hw_queue_schedule_job(job);
- }
cs->submitted = true;
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
- u64 paddr = 0;
- u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
- int handle, rc, i;
+ u64 paddr = 0, total_size, num_pgs, i;
+ u32 num_curr_pgs, page_size, page_shift;
+ int handle, rc;
bool contiguous;
num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
- "failed to allocate %u huge contiguous pages\n",
+ "failed to allocate %llu huge contiguous pages\n",
num_pgs);
return -ENOMEM;
}
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
phys_pg_pack->flags = args->flags;
phys_pg_pack->contiguous = contiguous;
- phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+ phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
page_size);
- kfree(phys_pg_pack->pages);
+ kvfree(phys_pg_pack->pages);
pages_arr_err:
kfree(phys_pg_pack);
pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
- int i;
+ u64 i;
if (!phys_pg_pack->created_from_userptr) {
if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
}
}
- kfree(phys_pg_pack->pages);
+ kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
}
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u32 size, u64 hint_addr,
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
}
if (!new_va_block) {
- dev_err(hdev->dev, "no available va block for size %u\n", size);
+ dev_err(hdev->dev, "no available va block for size %llu\n",
+ size);
goto out;
}
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
- u64 page_mask;
- u32 npages, total_npages, page_size = PAGE_SIZE;
+ u64 page_mask, total_npages;
+ u32 npages, page_size = PAGE_SIZE;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
page_mask = ~(((u64) page_size) - 1);
- phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+ phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+ GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
- u64 next_vaddr = vaddr, paddr;
+ u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
- int i, rc = 0, mapped_pg_cnt = 0;
+ int rc = 0;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
if (rc) {
dev_err(hdev->dev,
- "map failed for handle %u, npages: %d, mapped: %d",
+ "map failed for handle %u, npages: %llu, mapped: %llu",
phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
enum vm_type_t *vm_type;
- u64 next_vaddr;
+ u64 next_vaddr, i;
u32 page_size;
bool is_userptr;
- int i, rc;
+ int rc;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
- u64 real_virt_addr;
+ u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
+ real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+ rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
real_page_size);
if (rc)
goto err;
real_virt_addr += real_page_size;
+ real_phys_addr += real_page_size;
mapped_cnt++;
}
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index c712b7deb3a9..7c8f203f9a24 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int dma_on:1;
- unsigned int early_data:1;
struct mutex cmd_mutex;
@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
host->sg_count--;
}
-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
- bool early)
+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
{
struct alcor_pci_priv *priv = host->alcor_pci;
struct mmc_data *data = host->data;
@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
ctrl |= AU6601_DATA_WRITE;
if (data->host_cookie == COOKIE_MAPPED) {
- if (host->early_data) {
- host->early_data = false;
- return;
- }
-
- host->early_data = early;
-
alcor_data_set_dma(host);
ctrl |= AU6601_DATA_DMA_MODE;
host->dma_on = 1;
@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
static void alcor_prepare_data(struct alcor_sdmmc_host *host,
struct mmc_command *cmd)
{
+ struct alcor_pci_priv *priv = host->alcor_pci;
struct mmc_data *data = cmd->data;
if (!data)
@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
if (data->host_cookie != COOKIE_MAPPED)
alcor_prepare_sg_miter(host);
- alcor_trigger_data_transfer(host, true);
+ alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
}
static void alcor_send_cmd(struct alcor_sdmmc_host *host,
@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
if (!host->data)
return false;
- alcor_trigger_data_transfer(host, false);
+ alcor_trigger_data_transfer(host);
host->cmd = NULL;
return true;
}
@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
if (!host->data)
alcor_request_complete(host, 1);
else
- alcor_trigger_data_transfer(host, false);
+ alcor_trigger_data_transfer(host);
host->cmd = NULL;
}
@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
break;
case AU6601_INT_READ_BUF_RDY:
alcor_trf_block_pio(host, true);
- if (!host->blocks)
- break;
- alcor_trigger_data_transfer(host, false);
return 1;
case AU6601_INT_WRITE_BUF_RDY:
alcor_trf_block_pio(host, false);
- if (!host->blocks)
- break;
- alcor_trigger_data_transfer(host, false);
return 1;
case AU6601_INT_DMA_END:
if (!host->sg_count)
@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
break;
}
- if (intmask & AU6601_INT_DATA_END)
- return 0;
+ if (intmask & AU6601_INT_DATA_END) {
+ if (!host->dma_on && host->blocks) {
+ alcor_trigger_data_transfer(host);
+ return 1;
+ } else {
+ return 0;
+ }
+ }
return 1;
}
@@ -1044,14 +1036,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
mmc->caps2 = MMC_CAP2_NO_SDIO;
mmc->ops = &alcor_sdc_ops;
- /* Hardware cannot do scatter lists */
+ /* The hardware does DMA data transfer of 4096 bytes to/from a single
+ * buffer address. Scatterlists are not supported, but upon DMA
+ * completion (signalled via IRQ), the original vendor driver does
+ * then immediately set up another DMA transfer of the next 4096
+ * bytes.
+ *
+ * This means that we need to handle the I/O in 4096 byte chunks.
+ * Lacking a way to limit the sglist entries to 4096 bytes, we instead
+ * impose that only one segment is provided, with maximum size 4096,
+ * which also happens to be the minimum size. This means that the
+ * single-entry sglist handled by this driver can be handed directly
+ * to the hardware, nice and simple.
+ *
+ * Unfortunately though, that means we only do 4096 bytes I/O per
+ * MMC command. A future improvement would be to make the driver
+ * accept sg lists and entries of any size, and simply iterate
+ * through them 4096 bytes at a time.
+ */
mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
-
- mmc->max_blk_size = mmc->max_seg_size;
- mmc->max_blk_count = mmc->max_segs;
-
- mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
+ mmc->max_req_size = mmc->max_seg_size;
}
static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 49e0daf2ef5e..f37003df1e01 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
}
#endif
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+static void init_mmcsd_host(struct mmc_davinci_host *host)
{
mmc_davinci_reset_ctrl(host, 1);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index d54612257b06..45f7b9b53d48 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
struct scatterlist *sg;
int i;
- for_each_sg(data->sg, sg, data->sg_len, i) {
- void *buf = kmap_atomic(sg_page(sg) + sg->offset);
- buffer_swap32(buf, sg->length);
- kunmap_atomic(buf);
- }
+ for_each_sg(data->sg, sg, data->sg_len, i)
+ buffer_swap32(sg_virt(sg), sg->length);
}
#else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
{
struct mmc_data *data = host->req->data;
struct scatterlist *sg;
- void *buf;
int stat, i;
host->data = data;
@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
if (data->flags & MMC_DATA_READ) {
for_each_sg(data->sg, sg, data->sg_len, i) {
- buf = kmap_atomic(sg_page(sg) + sg->offset);
- stat = mxcmci_pull(host, buf, sg->length);
- kunmap(buf);
+ stat = mxcmci_pull(host, sg_virt(sg), sg->length);
if (stat)
return stat;
host->datasize += sg->length;
}
} else {
for_each_sg(data->sg, sg, data->sg_len, i) {
- buf = kmap_atomic(sg_page(sg) + sg->offset);
- stat = mxcmci_push(host, buf, sg->length);
- kunmap(buf);
+ stat = mxcmci_push(host, sg_virt(sg), sg->length);
if (stat)
return stat;
host->datasize += sg->length;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c907bf502a12..c1d3f0e38921 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
struct dma_async_tx_descriptor *tx;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
struct dma_slave_config config;
struct dma_chan *chan;
unsigned int nob = data->blocks;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 71e13844df6c..8742e27e4e8b 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct renesas_sdhi *priv;
struct resource *res;
int irq, ret, i;
+ u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
+ ver = sd_ctrl_read16(host, CTL_VERSION);
+ /* GEN2_SDR104 is first known SDHI to use 32bit block count */
+ if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
+ mmc_data->max_blk_count = U16_MAX;
+
ret = tmio_mmc_host_probe(host);
if (ret < 0)
goto edisclk;
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
+ if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
/* Enable tuning iff we have an SCC and a supported mode */
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index b1a66ca3821a..9f20fff9781b 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
sdhci_reset(host, mask);
}
+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
+ SDHCI_INT_TIMEOUT)
+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
+
+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->is_tuning && host->cmd && !host->data_early &&
+ (intmask & CMD_ERR_MASK)) {
+
+ /*
+ * Since we are not resetting data lines during tuning
+ * operation, data error or data complete interrupts
+ * might still arrive. Mark this request as a failure
+ * but still wait for the data interrupt
+ */
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->cmd->error = -EILSEQ;
+
+ host->cmd = NULL;
+
+ /*
+ * Sometimes command error interrupts and command complete
+ * interrupt will arrive together. Clear all command related
+ * interrupts here.
+ */
+ sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
+ intmask &= ~CMD_MASK;
+ }
+
+ return intmask;
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
.platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
+ .irq = sdhci_omap_irq,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -1056,6 +1094,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
mmc->f_max = 48000000;
}
+ if (!mmc_can_gpio_ro(mmc))
+ mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
pltfm_host->clk = devm_clk_get(dev, "fck");
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 72428b6bfc47..7b7286b4d81e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
continue;
}
- if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ /*
+ * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+ * the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
break;
if (chip_good(map, adr, datum)) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5e4ca082cfcd..7a96d168efc4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -216,8 +216,8 @@ config GENEVE
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
- depends on INET && NET_UDP_TUNNEL
- select NET_IP_TUNNEL
+ depends on INET
+ select NET_UDP_TUNNEL
---help---
This allows one to create gtp virtual interfaces that provide
the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+ return sprintf(buf, "%*phC\n",
+ slave->dev->addr_len,
+ slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0)
+ if (lane < 0 && lane != -ENODEV)
return lane;
- if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (lane >= 0) {
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
}
- err = mv88e6390x_serdes_power(chip, port, false);
- if (err)
- return err;
+ chip->ports[port].cmode = 0;
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
+ chip->ports[port].cmode = cmode;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
}
}
- chip->ports[port].cmode = cmode;
-
return 0;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 576b37d12a63..c4fa400efdcc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
}
+static u32
+qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
+{
+ u32 phy, val;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ /* callee is responsible for not passing bad ports,
+ * but we still would like to make spills impossible.
+ */
+ phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+ QCA8K_MDIO_MASTER_DATA(data);
+
+ qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+ return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
+{
+ u32 phy, val;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ /* callee is responsible for not passing bad ports,
+ * but we still would like to make spills impossible.
+ */
+ phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+ if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY))
+ return -ETIMEDOUT;
+
+ val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
+ QCA8K_MDIO_MASTER_DATA_MASK);
+
+ return val;
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ return qca8k_mdio_write(priv, port, regnum, data);
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_mdio_read(priv, port, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+ struct device_node *ports, *port;
+ int err;
+
+ ports = of_get_child_by_name(priv->dev->of_node, "ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &reg);
+ if (err)
+ return err;
+
+ if (!dsa_is_user_port(priv->ds, reg))
+ continue;
+
+ if (of_property_read_bool(port, "phy-handle"))
+ external_mdio_mask |= BIT(reg);
+ else
+ internal_mdio_mask |= BIT(reg);
+ }
+
+ if (!external_mdio_mask && !internal_mdio_mask) {
+ dev_err(priv->dev, "no PHYs are defined.\n");
+ return -EINVAL;
+ }
+
+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+ * the MDIO_MASTER register also _disconnects_ the external MDC
+ * passthrough to the internal PHYs. It's not possible to use both
+ * configurations at the same time!
+ *
+ * Because this came up during the review process:
+ * If the external mdio-bus driver is capable magically disabling
+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+ * accessors for the time being, it would be possible to pull this
+ * off.
+ */
+ if (!!external_mdio_mask && !!internal_mdio_mask) {
+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+ return -EINVAL;
+ }
+
+ if (external_mdio_mask) {
+ /* Make sure to disable the internal mdio bus in cases
+ * a dt-overlay and driver reload changed the configuration
+ */
+
+ qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
+ return 0;
+ }
+
+ priv->ops.phy_read = qca8k_phy_read;
+ priv->ops.phy_write = qca8k_phy_write;
+ return 0;
+}
+
static int
qca8k_setup(struct dsa_switch *ds)
{
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
if (IS_ERR(priv->regmap))
pr_warn("regmap initialization failed");
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
/* Initialize CPU port pad mode (xMII type, delays...) */
phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
qca8k_port_set_status(priv, port, 1);
}
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.setup = qca8k_setup,
.adjust_link = qca8k_adjust_link,
.get_strings = qca8k_get_strings,
- .phy_read = qca8k_phy_read,
- .phy_write = qca8k_phy_write,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.get_mac_eee = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->priv = priv;
- priv->ds->ops = &qca8k_switch_ops;
+ priv->ops = qca8k_switch_ops;
+ priv->ds->ops = &priv->ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index d146e54c8a6c..249fd62268e5 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -49,6 +49,18 @@
#define QCA8K_MIB_FLUSH BIT(24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL 0x3c
+#define QCA8K_MDIO_MASTER_BUSY BIT(31)
+#define QCA8K_MDIO_MASTER_EN BIT(30)
+#define QCA8K_MDIO_MASTER_READ BIT(27)
+#define QCA8K_MDIO_MASTER_WRITE 0
+#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
+#define QCA8K_MDIO_MASTER_DATA(x) (x)
+#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_MAX_PORTS 5
+#define QCA8K_MDIO_MASTER_MAX_REG 32
#define QCA8K_GOL_MAC_ADDR0 0x60
#define QCA8K_GOL_MAC_ADDR1 0x64
#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
+ struct dsa_switch_ops ops;
};
struct qca8k_mib_desc {
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 808abb6b3671..b15752267c8d 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
static void set_rx_mode(struct net_device *dev)
{
int ioaddr = dev->base_addr;
- short new_mode;
+ unsigned short new_mode;
if (dev->flags & IFF_PROMISC) {
if (corkscrew_debug > 3)
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 342ae08ec3c2..d60a86aa8aa8 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
static enum mac8390_access mac8390_testio(unsigned long membase)
{
- unsigned long outdata = 0xA5A0B5B0;
- unsigned long indata = 0x00000000;
+ u32 outdata = 0xA5A0B5B0;
+ u32 indata = 0;
+
/* Try writing 32 bits */
- memcpy_toio((void __iomem *)membase, &outdata, 4);
- /* Now compare them */
- if (memcmp_withio(&outdata, membase, 4) == 0)
+ nubus_writel(outdata, membase);
+ /* Now read it back */
+ indata = nubus_readl(membase);
+ if (outdata == indata)
return ACCESS_32;
+
+ outdata = 0xC5C0D5D0;
+ indata = 0;
+
/* Write 16 bit output */
word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
+
return ACCESS_UNKNOWN;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 74550ccc7a20..e2ffb159cbe2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
}
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
- if (buff->is_udp_cso || buff->is_tcp_cso)
- __skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0bb9d7b3a2b6..4c586ba4364b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
+ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cons = rxcmp->rx_cmp_opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- data = rx_buf->data;
- data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
prefetch(data_ptr);
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
goto next_rx;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 328373e0578f..060a6f386104 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp)
pci_set_power_state(tp->pdev, PCI_D3hot);
}
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
{
switch (val & MII_TG3_AUX_STAT_SPDMASK) {
case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
bool current_link_up;
u32 bmsr, val;
u32 lcl_adv, rmt_adv;
- u16 current_speed;
+ u32 current_speed;
u8 current_duplex;
int i, err;
@@ -5719,7 +5719,7 @@ out:
static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
{
u32 orig_pause_cfg;
- u16 orig_active_speed;
+ u32 orig_active_speed;
u8 orig_active_duplex;
u32 mac_status;
bool current_link_up;
@@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
{
int err = 0;
u32 bmsr, bmcr;
- u16 current_speed = SPEED_UNKNOWN;
+ u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
u32 local_adv, remote_adv, sgsr;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index a772a33b685c..6953d0546acb 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
- u16 speed;
+ u32 speed;
u8 duplex;
u8 autoneg;
u8 flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
u8 active_flowctrl;
u8 active_duplex;
- u16 active_speed;
+ u32 active_speed;
u32 rmt_adv;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ad099fd01b45..3da2795e2486 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP) &&
+ gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
*/
@@ -3370,14 +3372,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*hclk = devm_clk_get(&pdev->dev, "hclk");
}
- if (IS_ERR(*pclk)) {
+ if (IS_ERR_OR_NULL(*pclk)) {
err = PTR_ERR(*pclk);
+ if (!err)
+ err = -ENODEV;
+
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
- if (IS_ERR(*hclk)) {
+ if (IS_ERR_OR_NULL(*hclk)) {
err = PTR_ERR(*hclk);
+ if (!err)
+ err = -ENODEV;
+
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
return err;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..28eac9056211 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
- cancel_delayed_work_sync(&nic->link_change_work);
-
/* wait till all queued set_rx_mode tasks completes */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq) {
+ cancel_delayed_work_sync(&nic->link_change_work);
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+ }
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
/* wait till all queued set_rx_mode tasks completes if any */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq)
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
/* Send VF config done msg to PF */
nicvf_send_cfg_done(nic);
- INIT_DELAYED_WORK(&nic->link_change_work,
- nicvf_link_status_check_task);
- queue_delayed_work(nic->nicvf_rx_mode_wq,
- &nic->link_change_work, 0);
+ if (nic->nicvf_rx_mode_wq) {
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
+ }
return 0;
cleanup:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
/* Check if page can be recycled */
if (page) {
ref_count = page_ref_count(page);
- /* Check if this page has been used once i.e 'put_page'
- * called after packet transmission i.e internal ref_count
- * and page's ref_count are equal i.e page can be recycled.
+ /* This page can be recycled if internal ref_count and page's
+ * ref_count are equal, indicating that the page has been used
+ * once for packet transmission. For non-XDP mode, internal
+ * ref_count is always '1'.
*/
- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
- pgcache->ref_count--;
- else
- page = NULL;
-
- /* In non-XDP mode, page's ref_count needs to be '1' for it
- * to be recycled.
- */
- if (!rbdr->is_xdp && (ref_count != 1))
+ if (rbdr->is_xdp) {
+ if (ref_count == pgcache->ref_count)
+ pgcache->ref_count--;
+ else
+ page = NULL;
+ } else if (ref_count != 1) {
page = NULL;
+ }
}
if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head < rbdr->pgcnt) {
pgcache = &rbdr->pgcache[head];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
- if (!rbdr->is_xdp) {
- put_page(pgcache->page);
- continue;
+ if (rbdr->is_xdp) {
+ page_ref_sub(pgcache->page,
+ pgcache->ref_count - 1);
}
- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
put_page(pgcache->page);
}
head++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 3130b43bba52..02959035ed3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
}
/* should never happen! */
- BUG_ON(1);
+ BUG();
return NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 88773ca58e6b..b3da81e90132 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
break;
default:
- BUG_ON(1);
+ BUG();
}
return buf_size;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
ppmax = max;
/* pool size must be multiple of unsigned long */
- bmap = BITS_TO_LONGS(ppmax);
+ bmap = ppmax / BITS_PER_TYPE(unsigned long);
+ if (!bmap)
+ return NULL;
+
ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
if (reserve_factor) {
ppmax_pool = ppmax / reserve_factor;
pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+ if (!pool) {
+ ppmax_pool = 0;
+ reserve_factor = 0;
+ }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2ba49e959c3f..dc339dc1adb2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
*/
queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
+
+ fd_len = dpaa2_fd_get_len(&fd);
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
+
+ /* Everything that happens after this enqueues might race with
+ * the Tx confirmation callback for this frame
+ */
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, fq, &fd, false);
+ netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += fd_len;
-
- nq = netdev_get_tx_queue(net_dev, queue_mapping);
- netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
dpaa2_fd_set_format(&fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
- fq = &priv->fq[smp_processor_id()];
+ fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
- hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
};
struct hnae_queue {
- void __iomem *io_base;
+ u8 __iomem *io_base;
phys_addr_t phy_base;
struct hnae_ae_dev *dev; /* the device who use this queue */
struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb)
{
- param->vaddr = (void *)mac_cb->vaddr;
+ param->vaddr = mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb);
ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params {
char addr[ETH_ALEN];
- void *vaddr; /*virtual address*/
+ u8 __iomem *vaddr; /*virtual address*/
struct device *dev;
u8 mac_id;
/**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
enum mac_mode mac_mode;
u8 mac_id;
struct hns_mac_cb *mac_cb;
- void __iomem *io_base;
+ u8 __iomem *io_base;
unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
unsigned int virt_dev_num;
struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
-
- mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
}
/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num;
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
0xff,
mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
/* config mc entry with mask */
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* config key mask */
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
} else { /* not zero, just del port, update */
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
&tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
- port, mask_entry.addr);
+ 0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
- if (port < DSAF_SERVICE_NW_NUM) {
- mskid = port;
- } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
- mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
- } else {
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
+
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
(mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *
+static u8 __iomem *
hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
dsaf_dev->ppe_common[comm_index] = NULL;
}
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
- int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
{
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- void __iomem *io_base;
+ u8 __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
struct ppe_common_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
- void __iomem *io_base;
+ u8 __iomem *io_base;
enum ppe_common_mode ppe_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
} else {
ring = &q->tx_ring;
- ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ ring->io_base = ring_pair_cb->q.io_base +
HNS_RCB_TX_REG_OFFSET;
irq_idx = HNS_RCB_IRQ_IDX_TX;
mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift)
{
u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
#define dsaf_write_b(addr, data)\
- writeb((data), (__iomem unsigned char *)(addr))
+ writeb((data), (__iomem u8 *)(addr))
#define dsaf_read_b(addr)\
- readb((__iomem unsigned char *)(addr))
+ readb((__iomem u8 *)(addr))
#define hns_mac_reg_read64(drv, offset) \
- readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
- dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..4cd86ba1f050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- ring->stats.tx_pkts++;
- ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 1c1f17ec6be2..162cb9afa0e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -22,6 +22,7 @@
#include "hns3_enet.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
- frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+ frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
int i;
size = skb_headlen(skb);
- buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+ buf_num = hns3_tx_bd_count(size);
frag_num = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frag_num; i++) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
- HNS3_MAX_BD_SIZE_OFFSET;
+ bdnum_for_frag = hns3_tx_bd_count(size);
if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
}
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
- HNS3_MAX_BD_SIZE_OFFSET;
+ buf_num = hns3_tx_bd_count(skb->len);
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
buf_num = skb_shinfo(skb)->nr_frags + 1;
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+ buf_num = hns3_tx_bd_count(skb->len);
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1db0bd41d209..75669cd0c311 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -193,7 +193,6 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_SIZE_OFFSET 16
#define HNS3_MAX_BD_PER_FRAG 8
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
};
struct hns_mdio_device {
- void *vbase; /* mdio reg base address */
+ u8 __iomem *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
struct hns_mdio_sc_reg sc_reg;
};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- writel_relaxed(value, reg_addr + reg);
+ writel_relaxed(value, base + reg);
}
#define MDIO_WRITE_REG(a, reg, value) \
mdio_write_reg((a)->vbase, (reg), (value))
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- return readl_relaxed(reg_addr + reg);
+ return readl_relaxed(base + reg);
}
#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
u32 val)
{
u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3baabdc89726..90b62c1412c8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
if (ehea_add_adapter_mr(adapter)) {
pr_err("creating MR failed\n");
+ of_node_put(eth_dn);
return -EIO;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..51cfe95f3e24 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_completion(&adapter->init_done);
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- init_completion(&adapter->init_done);
+ reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ init_completion(&adapter->init_done);
adapter->resetting = false;
adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..ecef949f3baa 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
+ if (!fm10k_workqueue)
+ return -ENOMEM;
fm10k_dbg_init();
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
- bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
- int qid = ring->queue_index;
-
- if (ring_is_xdp(ring))
- qid -= ring->vsi->alloc_queue_pairs;
-
- if (!xdp_on)
- return NULL;
-
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
- | (wol->wolopts != WAKE_FILTER))
+ if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
/* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf;
err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ bitmap_free(vsi->af_xdp_zc_qps);
i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now;
+ struct timespec64 now, then;
+ then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now, NULL);
- timespec64_add_ns(&now, delta);
+ now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
+ clear_bit(qid, vsi->af_xdp_zc_qps);
i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..3269d8e94744 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
+ bool wake;
rtnl_lock();
netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- if (!runtime) {
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
- }
-#endif
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
+ if (enable_wake)
+ *enable_wake = wake;
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
- int retval;
- bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
+ int err = -ENODEV;
- adapter->mii_bus = devm_mdiobus_alloc(dev);
- if (!adapter->mii_bus)
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
return -ENOMEM;
- bus = adapter->mii_bus;
-
switch (hw->device_id) {
/* C3000 SoCs */
case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*/
hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- return mdiobus_register(bus);
+ err = mdiobus_register(bus);
+ if (!err) {
+ adapter->mii_bus = bus;
+ return 0;
+ }
ixgbe_no_mii_bus:
devm_mdiobus_free(dev, bus);
- adapter->mii_bus = NULL;
- return -ENODEV;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
- return -EOPNOTSUPP;
-
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..4ab0d030b544 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
return err;
}
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
{
u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
- if (err) {
- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
- return 0;
- }
+ if (err)
+ speed = SPEED_40000;
+ speed = max_t(u32, speed, SPEED_40000);
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int mtu)
+ u32 xoff, unsigned int max_mtu)
{
int i;
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
return -ENOMEM;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
+ port_buffer->buffer[i].xon =
+ port_buffer->buffer[i].xoff - max_mtu;
}
return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy()
- * mtu: device's MTU
+ * max_mtu: netdev's max_mtu
* pfc_en: <input> current pfc configuration
* buffer: <input> current prio to buffer mapping
* xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* Return 0 if no error.
* Set change to true if buffer configuration is modified.
*/
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
if (err)
return err;
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
return 0;
}
+#define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
bool update_prio2buffer = false;
u8 buffer[MLX5E_MAX_PRIORITY];
bool update_buffer = false;
+ unsigned int max_mtu;
u32 total_used = 0;
u8 curr_pfc_en;
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
&port_buffer, &update_buffer);
if (err)
return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ xoff, &port_buffer, &update_buffer);
if (err)
return err;
}
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+ mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- int err = -ENOMEM;
+ int err = 0;
u32 tirn = 0;
int inlen;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ if (!in) {
+ err = -ENOMEM;
goto out;
+ }
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..5efce4a3ff79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
- unsigned long *advertising_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap, bool ext)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+ max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+ ARRAY_SIZE(ptys2legacy_ethtool_table);
+
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
- u8 tx_pause, u8 rx_pause,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings,
+ bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+ ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
+ bool admin_ext;
bool ext;
int err;
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_admin);
+ /* Fields: eth_proto_admin and ext_eth_proto_admin are
+ * mutually exclusive. Hence try reading legacy advertising
+ * when extended advertising is zero.
+ * admin_ext indicates how eth_proto_admin should be
+ * interpreted
+ */
+ admin_ext = ext;
+ if (ext && !eth_proto_admin) {
+ eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+ eth_proto_admin);
+ admin_ext = false;
+ }
+
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(mdev, eth_proto_cap, link_ksettings);
- get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+ admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
- ext_requested = (link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT);
+ ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+ MLX5E_PTYS_EXT ||
+ link_ksettings->link_modes.advertising[1]);
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
- /*when ptys_extended_ethernet is set legacy link modes are deprecated */
- if (ext_requested != ext_supported)
- return -EPROTONOSUPPORT;
+ ext_requested &= ext_supported;
speed = link_ksettings->base.speed;
ethtool2ptys_adver_func = ext_requested ?
mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
mlx5_toggle_port_link(mdev);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..d75dc44eb2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
return true;
}
+struct ip_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+ __be16 payload_len;
+ __u8 nexthdr;
+ __u8 hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+ u32 mask, offset;
+ u8 htype;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+ mask = ~act->mangle.mask;
+ /* For IPv4 & IPv6 header check 4 byte word,
+ * to determine that modified fields
+ * are NOT ttl & hop_limit only.
+ */
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ struct ip_ttl_word *ttl_word =
+ (struct ip_ttl_word *)&mask;
+
+ if (offset != offsetof(struct iphdr, ttl) ||
+ ttl_word->protocol ||
+ ttl_word->check) {
+ return true;
+ }
+ } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ struct ipv6_hoplimit_word *hoplimit_word =
+ (struct ipv6_hoplimit_word *)&mask;
+
+ if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ hoplimit_word->payload_len ||
+ hoplimit_word->nexthdr) {
+ return true;
+ }
+ }
+ return false;
+}
+
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
- u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
+ u8 ip_proto;
int i;
if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- htype = act->mangle.htype;
- if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
- htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ if (is_action_keys_supported(act)) {
modify_ip_header = true;
break;
}
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
- struct ip_tunnel_key *b)
+struct encap_key {
+ struct ip_tunnel_key *ip_tun_key;
+ int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
{
- return memcmp(a, b, sizeof(*a));
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tunnel_type != b->tunnel_type;
}
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
{
- return jhash(key, sizeof(*key), 0);
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tunnel_type);
}
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct ip_tunnel_info *tun_info;
- struct ip_tunnel_key *key;
+ struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = &parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
- key = &tun_info->key;
+ key.ip_tun_key = &tun_info->key;
+ key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
- hash_key = hash_encap_info(key);
+ hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info.key, key)) {
+ e_key.ip_tun_key = &e->tun_info.key;
+ e_key.tunnel_type = e->tunnel_type;
+ if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
}
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
int err;
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
err = esw_create_legacy_vepa_table(esw);
if (err)
return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
+ memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..9b2d78ee22b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..8de64e88c670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
void *cmd;
int ret;
+ rcu_read_lock();
+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+ rcu_read_unlock();
+
+ if (!flow) {
+ WARN_ONCE(1, "Received NULL pointer for handle\n");
+ return -EINVAL;
+ }
+
buf = kzalloc(size, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
cmd = (buf + 1);
- rcu_read_lock();
- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- rcu_read_unlock();
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+ if (ret < 0)
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
- .mr_cache[16] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[17] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[18] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[19] = {
- .size = 4,
- .limit = 2
- },
- .mr_cache[20] = {
- .size = 4,
- .limit = 2
- },
},
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 370ca94b6775..b8ba74de9555 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -40,6 +40,9 @@
#include "mlx5_core.h"
#include "lib/eq.h"
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
+
static struct mlx5_core_rsc_common *
mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
{
@@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
+static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct, bool need_cleanup)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ goto destroy;
+ } else {
+ mlx5_core_warn(
+ dev, "failed drain DCT 0x%x with error 0x%x\n",
+ qp->qpn, err);
+ return err;
+ }
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ if (need_cleanup)
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
+ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+ return err;
+}
+
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct,
- u32 *in, int inlen)
+ u32 *in, int inlen,
+ u32 *out, int outlen)
{
- u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
- u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
- u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
struct mlx5_core_qp *qp = &dct->mqp;
int err;
init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err) {
mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
return err;
@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
return 0;
err_cmd:
- MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
- MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
- MLX5_SET(destroy_dct_in, din, uid, qp->uid);
- mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
- (void *)&out, sizeof(dout));
+ _mlx5_core_destroy_dct(dev, dct, false);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct)
{
- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
- struct mlx5_core_qp *qp = &dct->mqp;
- int err;
-
- err = mlx5_core_drain_dct(dev, dct);
- if (err) {
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- goto destroy;
- } else {
- mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
- return err;
- }
- }
- wait_for_completion(&dct->drained);
-destroy:
- destroy_resource_common(dev, &dct->mqp);
- MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
- MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
- MLX5_SET(destroy_dct_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
- return err;
+ return _mlx5_core_destroy_dct(dev, dct, true);
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 7a15e932ed2f..c1c1965d7acc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
return 0;
default:
/* Do not consider thresholds for zero temperature. */
- if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
+ if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
*temp = 0;
return 0;
}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index bd6e9014bc74..7849119d407a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -142,6 +142,12 @@ struct ks8851_net {
static int msg_enable;
+/* SPI frame opcodes */
+#define KS_SPIOP_RD (0x00)
+#define KS_SPIOP_WR (0x40)
+#define KS_SPIOP_RXFIFO (0x80)
+#define KS_SPIOP_TXFIFO (0xC0)
+
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
/* set dma read address */
ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
- /* start the packet dma process, and set auto-dequeue rx */
- ks8851_wrreg16(ks, KS_RXQCR,
- ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+ /* start DMA access */
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
if (rxlen > 4) {
unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
}
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* end DMA access and dequeue packet */
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
}
}
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ int ret;
+
+ ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev->name, ks);
+ if (ret < 0) {
+ netdev_err(dev, "failed to get irq\n");
+ return ret;
+ }
/* lock the card, even if we may not actually be doing anything
* else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
+ mii_check_link(&ks->mii);
return 0;
}
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
dev_kfree_skb(txb);
}
+ free_irq(dev->irq, ks);
+
return 0;
}
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
spi_set_drvdata(spi, ks);
+ netif_carrier_off(ks->netdev);
ndev->if_port = IF_PORT_100BASET;
ndev->netdev_ops = &ks8851_netdev_ops;
ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
- ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- ndev->name, ks);
- if (ret < 0) {
- dev_err(&spi->dev, "failed to get irq\n");
- goto err_irq;
- }
-
ret = register_netdev(ndev);
if (ret) {
dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
return 0;
-
err_netdev:
- free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
-err_id:
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
- free_irq(spi->irq, priv);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 852256ef1f22..23da1e3ee429 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -11,9 +11,15 @@
*/
#define KS_CCR 0x08
+#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
#define CCR_EEPROM (1 << 9)
-#define CCR_SPI (1 << 8)
-#define CCR_32PIN (1 << 0)
+#define CCR_SPI (1 << 8) /* KSZ8851SNL */
+#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */
+#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */
+#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */
+#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */
+#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */
+#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
/* MAC address registers */
#define KS_MAR(_m) (0x15 - (_m))
@@ -112,13 +118,13 @@
#define RXCR1_RXE (1 << 0)
#define KS_RXCR2 0x76
-#define RXCR2_SRDBL_MASK (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT (5)
-#define RXCR2_SRDBL_4B (0x0 << 5)
-#define RXCR2_SRDBL_8B (0x1 << 5)
-#define RXCR2_SRDBL_16B (0x2 << 5)
-#define RXCR2_SRDBL_32B (0x3 << 5)
-#define RXCR2_SRDBL_FRAME (0x4 << 5)
+#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */
+#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */
#define RXCR2_IUFFP (1 << 4)
#define RXCR2_RXIUFCEZ (1 << 3)
#define RXCR2_UDPLFE (1 << 2)
@@ -143,8 +149,10 @@
#define RXFSHR_RXCE (1 << 0)
#define KS_RXFHBCR 0x7E
+#define RXFHBCR_CNT_MASK (0xfff << 0)
+
#define KS_TXQCR 0x80
-#define TXQCR_AETFE (1 << 2)
+#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */
#define TXQCR_TXQMAM (1 << 1)
#define TXQCR_METFE (1 << 0)
@@ -167,6 +175,10 @@
#define KS_RXFDPR 0x86
#define RXFDPR_RXFPAI (1 << 14)
+#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */
+#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */
+#define RXFDPR_RXFP_MASK (0x7ff << 0)
+#define RXFDPR_RXFP_SHIFT (0)
#define KS_RXDTTR 0x8C
#define KS_RXDBCTR 0x8E
@@ -184,7 +196,7 @@
#define IRQ_RXMPDI (1 << 4)
#define IRQ_LDI (1 << 3)
#define IRQ_EDI (1 << 2)
-#define IRQ_SPIBEI (1 << 1)
+#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */
#define IRQ_DEDI (1 << 0)
#define KS_RXFCTR 0x9C
@@ -257,42 +269,37 @@
#define KS_P1ANLPR 0xEE
#define KS_P1SCLMD 0xF4
-#define P1SCLMD_LEDOFF (1 << 15)
-#define P1SCLMD_TXIDS (1 << 14)
-#define P1SCLMD_RESTARTAN (1 << 13)
-#define P1SCLMD_DISAUTOMDIX (1 << 10)
-#define P1SCLMD_FORCEMDIX (1 << 9)
-#define P1SCLMD_AUTONEGEN (1 << 7)
-#define P1SCLMD_FORCE100 (1 << 6)
-#define P1SCLMD_FORCEFDX (1 << 5)
-#define P1SCLMD_ADV_FLOW (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX (1 << 0)
#define KS_P1CR 0xF6
-#define P1CR_HP_MDIX (1 << 15)
-#define P1CR_REV_POL (1 << 13)
-#define P1CR_OP_100M (1 << 10)
-#define P1CR_OP_FDX (1 << 9)
-#define P1CR_OP_MDI (1 << 7)
-#define P1CR_AN_DONE (1 << 6)
-#define P1CR_LINK_GOOD (1 << 5)
-#define P1CR_PNTR_FLOW (1 << 4)
-#define P1CR_PNTR_100BT_FDX (1 << 3)
-#define P1CR_PNTR_100BT_HDX (1 << 2)
-#define P1CR_PNTR_10BT_FDX (1 << 1)
-#define P1CR_PNTR_10BT_HDX (1 << 0)
+#define P1CR_LEDOFF (1 << 15)
+#define P1CR_TXIDS (1 << 14)
+#define P1CR_RESTARTAN (1 << 13)
+#define P1CR_DISAUTOMDIX (1 << 10)
+#define P1CR_FORCEMDIX (1 << 9)
+#define P1CR_AUTONEGEN (1 << 7)
+#define P1CR_FORCE100 (1 << 6)
+#define P1CR_FORCEFDX (1 << 5)
+#define P1CR_ADV_FLOW (1 << 4)
+#define P1CR_ADV_100BT_FDX (1 << 3)
+#define P1CR_ADV_100BT_HDX (1 << 2)
+#define P1CR_ADV_10BT_FDX (1 << 1)
+#define P1CR_ADV_10BT_HDX (1 << 0)
+
+#define KS_P1SR 0xF8
+#define P1SR_HP_MDIX (1 << 15)
+#define P1SR_REV_POL (1 << 13)
+#define P1SR_OP_100M (1 << 10)
+#define P1SR_OP_FDX (1 << 9)
+#define P1SR_OP_MDI (1 << 7)
+#define P1SR_AN_DONE (1 << 6)
+#define P1SR_LINK_GOOD (1 << 5)
+#define P1SR_PNTR_FLOW (1 << 4)
+#define P1SR_PNTR_100BT_FDX (1 << 3)
+#define P1SR_PNTR_100BT_HDX (1 << 2)
+#define P1SR_PNTR_10BT_FDX (1 << 1)
+#define P1SR_PNTR_10BT_HDX (1 << 0)
/* TX Frame control */
-
#define TXFR_TXIC (1 << 15)
#define TXFR_TXFID_MASK (0x3f << 0)
#define TXFR_TXFID_SHIFT (0)
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD (0x00)
-#define KS_SPIOP_WR (0x40)
-#define KS_SPIOP_RXFIFO (0x80)
-#define KS_SPIOP_TXFIFO (0xC0)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 35f8c9ef204d..c946841c0a06 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,6 +40,8 @@
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include "ks8851.h"
+
#define DRV_NAME "ks8851_mll"
static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define TX_BUF_SIZE 2000
#define RX_BUF_SIZE 2000
-#define KS_CCR 0x08
-#define CCR_EEPROM (1 << 9)
-#define CCR_SPI (1 << 8)
-#define CCR_8BIT (1 << 7)
-#define CCR_16BIT (1 << 6)
-#define CCR_32BIT (1 << 5)
-#define CCR_SHARED (1 << 4)
-#define CCR_32PIN (1 << 0)
-
-/* MAC address registers */
-#define KS_MARL 0x10
-#define KS_MARM 0x12
-#define KS_MARH 0x14
-
-#define KS_OBCR 0x20
-#define OBCR_ODS_16MA (1 << 6)
-
-#define KS_EEPCR 0x22
-#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB (1 << 3)
-#define EEPCR_EEDO (1 << 2)
-#define EEPCR_EESCK (1 << 1)
-#define EEPCR_EECS (1 << 0)
-
-#define KS_MBIR 0x24
-#define MBIR_TXMBF (1 << 12)
-#define MBIR_TXMBFA (1 << 11)
-#define MBIR_RXMBF (1 << 4)
-#define MBIR_RXMBFA (1 << 3)
-
-#define KS_GRR 0x26
-#define GRR_QMU (1 << 1)
-#define GRR_GSR (1 << 0)
-
-#define KS_WFCR 0x2A
-#define WFCR_MPRXE (1 << 7)
-#define WFCR_WF3E (1 << 3)
-#define WFCR_WF2E (1 << 2)
-#define WFCR_WF1E (1 << 1)
-#define WFCR_WF0E (1 << 0)
-
-#define KS_WF0CRC0 0x30
-#define KS_WF0CRC1 0x32
-#define KS_WF0BM0 0x34
-#define KS_WF0BM1 0x36
-#define KS_WF0BM2 0x38
-#define KS_WF0BM3 0x3A
-
-#define KS_WF1CRC0 0x40
-#define KS_WF1CRC1 0x42
-#define KS_WF1BM0 0x44
-#define KS_WF1BM1 0x46
-#define KS_WF1BM2 0x48
-#define KS_WF1BM3 0x4A
-
-#define KS_WF2CRC0 0x50
-#define KS_WF2CRC1 0x52
-#define KS_WF2BM0 0x54
-#define KS_WF2BM1 0x56
-#define KS_WF2BM2 0x58
-#define KS_WF2BM3 0x5A
-
-#define KS_WF3CRC0 0x60
-#define KS_WF3CRC1 0x62
-#define KS_WF3BM0 0x64
-#define KS_WF3BM1 0x66
-#define KS_WF3BM2 0x68
-#define KS_WF3BM3 0x6A
-
-#define KS_TXCR 0x70
-#define TXCR_TCGICMP (1 << 8)
-#define TXCR_TCGUDP (1 << 7)
-#define TXCR_TCGTCP (1 << 6)
-#define TXCR_TCGIP (1 << 5)
-#define TXCR_FTXQ (1 << 4)
-#define TXCR_TXFCE (1 << 3)
-#define TXCR_TXPE (1 << 2)
-#define TXCR_TXCRC (1 << 1)
-#define TXCR_TXE (1 << 0)
-
-#define KS_TXSR 0x72
-#define TXSR_TXLC (1 << 13)
-#define TXSR_TXMC (1 << 12)
-#define TXSR_TXFID_MASK (0x3f << 0)
-#define TXSR_TXFID_SHIFT (0)
-#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
-
-
-#define KS_RXCR1 0x74
-#define RXCR1_FRXQ (1 << 15)
-#define RXCR1_RXUDPFCC (1 << 14)
-#define RXCR1_RXTCPFCC (1 << 13)
-#define RXCR1_RXIPFCC (1 << 12)
-#define RXCR1_RXPAFMA (1 << 11)
-#define RXCR1_RXFCE (1 << 10)
-#define RXCR1_RXEFE (1 << 9)
-#define RXCR1_RXMAFMA (1 << 8)
-#define RXCR1_RXBE (1 << 7)
-#define RXCR1_RXME (1 << 6)
-#define RXCR1_RXUE (1 << 5)
-#define RXCR1_RXAE (1 << 4)
-#define RXCR1_RXINVF (1 << 1)
-#define RXCR1_RXE (1 << 0)
#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-
-#define KS_RXCR2 0x76
-#define RXCR2_SRDBL_MASK (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT (5)
-#define RXCR2_SRDBL_4B (0x0 << 5)
-#define RXCR2_SRDBL_8B (0x1 << 5)
-#define RXCR2_SRDBL_16B (0x2 << 5)
-#define RXCR2_SRDBL_32B (0x3 << 5)
-/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
-#define RXCR2_IUFFP (1 << 4)
-#define RXCR2_RXIUFCEZ (1 << 3)
-#define RXCR2_UDPLFE (1 << 2)
-#define RXCR2_RXICMPFCC (1 << 1)
-#define RXCR2_RXSAF (1 << 0)
-
-#define KS_TXMIR 0x78
-
-#define KS_RXFHSR 0x7C
-#define RXFSHR_RXFV (1 << 15)
-#define RXFSHR_RXICMPFCS (1 << 13)
-#define RXFSHR_RXIPFCS (1 << 12)
-#define RXFSHR_RXTCPFCS (1 << 11)
-#define RXFSHR_RXUDPFCS (1 << 10)
-#define RXFSHR_RXBF (1 << 7)
-#define RXFSHR_RXMF (1 << 6)
-#define RXFSHR_RXUF (1 << 5)
-#define RXFSHR_RXMR (1 << 4)
-#define RXFSHR_RXFT (1 << 3)
-#define RXFSHR_RXFTL (1 << 2)
-#define RXFSHR_RXRF (1 << 1)
-#define RXFSHR_RXCE (1 << 0)
-#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
- RXFSHR_RXFTL | RXFSHR_RXMR |\
- RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
- RXFSHR_RXTCPFCS)
-#define KS_RXFHBCR 0x7E
-#define RXFHBCR_CNT_MASK 0x0FFF
-
-#define KS_TXQCR 0x80
-#define TXQCR_AETFE (1 << 2)
-#define TXQCR_TXQMAM (1 << 1)
-#define TXQCR_METFE (1 << 0)
-
-#define KS_RXQCR 0x82
-#define RXQCR_RXDTTS (1 << 12)
-#define RXQCR_RXDBCTS (1 << 11)
-#define RXQCR_RXFCTS (1 << 10)
-#define RXQCR_RXIPHTOE (1 << 9)
-#define RXQCR_RXDTTE (1 << 7)
-#define RXQCR_RXDBCTE (1 << 6)
-#define RXQCR_RXFCTE (1 << 5)
-#define RXQCR_ADRFE (1 << 4)
-#define RXQCR_SDA (1 << 3)
-#define RXQCR_RRXEF (1 << 0)
#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
-#define KS_TXFDPR 0x84
-#define TXFDPR_TXFPAI (1 << 14)
-#define TXFDPR_TXFP_MASK (0x7ff << 0)
-#define TXFDPR_TXFP_SHIFT (0)
-
-#define KS_RXFDPR 0x86
-#define RXFDPR_RXFPAI (1 << 14)
-
-#define KS_RXDTTR 0x8C
-#define KS_RXDBCTR 0x8E
-
-#define KS_IER 0x90
-#define KS_ISR 0x92
-#define IRQ_LCI (1 << 15)
-#define IRQ_TXI (1 << 14)
-#define IRQ_RXI (1 << 13)
-#define IRQ_RXOI (1 << 11)
-#define IRQ_TXPSI (1 << 9)
-#define IRQ_RXPSI (1 << 8)
-#define IRQ_TXSAI (1 << 6)
-#define IRQ_RXWFDI (1 << 5)
-#define IRQ_RXMPDI (1 << 4)
-#define IRQ_LDI (1 << 3)
-#define IRQ_EDI (1 << 2)
-#define IRQ_SPIBEI (1 << 1)
-#define IRQ_DEDI (1 << 0)
-
-#define KS_RXFCTR 0x9C
-#define RXFCTR_THRESHOLD_MASK 0x00FF
-
-#define KS_RXFC 0x9D
-#define RXFCTR_RXFC_MASK (0xff << 8)
-#define RXFCTR_RXFC_SHIFT (8)
-#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
-#define RXFCTR_RXFCT_MASK (0xff << 0)
-#define RXFCTR_RXFCT_SHIFT (0)
-
-#define KS_TXNTFSR 0x9E
-
-#define KS_MAHTR0 0xA0
-#define KS_MAHTR1 0xA2
-#define KS_MAHTR2 0xA4
-#define KS_MAHTR3 0xA6
-
-#define KS_FCLWR 0xB0
-#define KS_FCHWR 0xB2
-#define KS_FCOWR 0xB4
-
-#define KS_CIDER 0xC0
-#define CIDER_ID 0x8870
-#define CIDER_REV_MASK (0x7 << 1)
-#define CIDER_REV_SHIFT (1)
-#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
-
-#define KS_CGCR 0xC6
-#define KS_IACR 0xC8
-#define IACR_RDEN (1 << 12)
-#define IACR_TSEL_MASK (0x3 << 10)
-#define IACR_TSEL_SHIFT (10)
-#define IACR_TSEL_MIB (0x3 << 10)
-#define IACR_ADDR_MASK (0x1f << 0)
-#define IACR_ADDR_SHIFT (0)
-
-#define KS_IADLR 0xD0
-#define KS_IAHDR 0xD2
-
-#define KS_PMECR 0xD4
-#define PMECR_PME_DELAY (1 << 14)
-#define PMECR_PME_POL (1 << 12)
-#define PMECR_WOL_WAKEUP (1 << 11)
-#define PMECR_WOL_MAGICPKT (1 << 10)
-#define PMECR_WOL_LINKUP (1 << 9)
-#define PMECR_WOL_ENERGY (1 << 8)
-#define PMECR_AUTO_WAKE_EN (1 << 7)
-#define PMECR_WAKEUP_NORMAL (1 << 6)
-#define PMECR_WKEVT_MASK (0xf << 2)
-#define PMECR_WKEVT_SHIFT (2)
-#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
-#define PMECR_WKEVT_ENERGY (0x1 << 2)
-#define PMECR_WKEVT_LINK (0x2 << 2)
-#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
-#define PMECR_WKEVT_FRAME (0x8 << 2)
-#define PMECR_PM_MASK (0x3 << 0)
-#define PMECR_PM_SHIFT (0)
-#define PMECR_PM_NORMAL (0x0 << 0)
-#define PMECR_PM_ENERGY (0x1 << 0)
-#define PMECR_PM_SOFTDOWN (0x2 << 0)
-#define PMECR_PM_POWERSAVE (0x3 << 0)
-
-/* Standard MII PHY data */
-#define KS_P1MBCR 0xE4
-#define P1MBCR_FORCE_FDX (1 << 8)
-
-#define KS_P1MBSR 0xE6
-#define P1MBSR_AN_COMPLETE (1 << 5)
-#define P1MBSR_AN_CAPABLE (1 << 3)
-#define P1MBSR_LINK_UP (1 << 2)
-
-#define KS_PHY1ILR 0xE8
-#define KS_PHY1IHR 0xEA
-#define KS_P1ANAR 0xEC
-#define KS_P1ANLPR 0xEE
-
-#define KS_P1SCLMD 0xF4
-#define P1SCLMD_LEDOFF (1 << 15)
-#define P1SCLMD_TXIDS (1 << 14)
-#define P1SCLMD_RESTARTAN (1 << 13)
-#define P1SCLMD_DISAUTOMDIX (1 << 10)
-#define P1SCLMD_FORCEMDIX (1 << 9)
-#define P1SCLMD_AUTONEGEN (1 << 7)
-#define P1SCLMD_FORCE100 (1 << 6)
-#define P1SCLMD_FORCEFDX (1 << 5)
-#define P1SCLMD_ADV_FLOW (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX (1 << 0)
-
-#define KS_P1CR 0xF6
-#define P1CR_HP_MDIX (1 << 15)
-#define P1CR_REV_POL (1 << 13)
-#define P1CR_OP_100M (1 << 10)
-#define P1CR_OP_FDX (1 << 9)
-#define P1CR_OP_MDI (1 << 7)
-#define P1CR_AN_DONE (1 << 6)
-#define P1CR_LINK_GOOD (1 << 5)
-#define P1CR_PNTR_FLOW (1 << 4)
-#define P1CR_PNTR_100BT_FDX (1 << 3)
-#define P1CR_PNTR_100BT_HDX (1 << 2)
-#define P1CR_PNTR_10BT_FDX (1 << 1)
-#define P1CR_PNTR_10BT_HDX (1 << 0)
-
-/* TX Frame control */
-
-#define TXFR_TXIC (1 << 15)
-#define TXFR_TXFID_MASK (0x3f << 0)
-#define TXFR_TXFID_SHIFT (0)
-
-#define KS_P1SR 0xF8
-#define P1SR_HP_MDIX (1 << 15)
-#define P1SR_REV_POL (1 << 13)
-#define P1SR_OP_100M (1 << 10)
-#define P1SR_OP_FDX (1 << 9)
-#define P1SR_OP_MDI (1 << 7)
-#define P1SR_AN_DONE (1 << 6)
-#define P1SR_LINK_GOOD (1 << 5)
-#define P1SR_PNTR_FLOW (1 << 4)
-#define P1SR_PNTR_100BT_FDX (1 << 3)
-#define P1SR_PNTR_100BT_HDX (1 << 2)
-#define P1SR_PNTR_10BT_FDX (1 << 1)
-#define P1SR_PNTR_10BT_HDX (1 << 0)
-
#define ENUM_BUS_NONE 0
#define ENUM_BUS_8BIT 1
#define ENUM_BUS_16BIT 2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
- ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+ ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
/* Setup RxQ Command Control (RXQCR) */
ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
*/
w = ks_rdreg16(ks, KS_P1MBCR);
- w &= ~P1MBCR_FORCE_FDX;
+ w &= ~BMCR_FULLDPLX;
ks_wrreg16(ks, KS_P1MBCR, w);
w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
ks_setup_int(ks);
data = ks_rdreg16(ks, KS_OBCR);
- ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+ ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
/* overwriting the default MAC address */
if (pdev->dev.of_node) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..e336f6ee94f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
tmp_push_vlan_tci =
FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
- NFP_FL_PUSH_VLAN_CFI;
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (match.key->vlan_id || match.key->vlan_priority) {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.key->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.mask->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.mask->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- msk->tci = cpu_to_be16(tmp_tci);
- }
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.key->vlan_id);
+ ext->tci = cpu_to_be16(tmp_tci);
+
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.mask->vlan_id);
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..94d228c04496 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
- return ret;
+ return NETDEV_TX_OK;
}
static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
- netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 0c443ea98479..374a4d4371f9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
u16 board_type;
u16 supported_type;
- u16 link_speed;
+ u32 link_speed;
u16 link_duplex;
u16 link_autoneg;
u16 module_type;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 3b0adda7cc9c..a4cd6f2cfb86 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+ if (!skb)
+ break;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index cfb67b746595..58e0ca9093d3 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
write_reg_high(ioaddr, IMR, ISRh_RxErr);
lp->tx_unit_busy = 0;
- lp->pac_cnt_in_tx_buf = 0;
+ lp->pac_cnt_in_tx_buf = 0;
lp->saved_tx_size = 0;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c29dde064078..ed651dde6ef9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
#include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -678,6 +679,7 @@ struct rtl8169_private {
struct work_struct work;
} wk;
+ unsigned irq_enabled:1;
unsigned supports_gmii:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
@@ -1293,6 +1295,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
static void rtl_irq_disable(struct rtl8169_private *tp)
{
RTL_W16(tp, IntrMask, 0);
+ tp->irq_enabled = 0;
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1301,6 +1304,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
static void rtl_irq_enable(struct rtl8169_private *tp)
{
+ tp->irq_enabled = 1;
RTL_W16(tp, IntrMask, tp->irq_mask);
}
@@ -5457,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_W16(tp, IntrMitigate, 0x5151);
+ RTL_W16(tp, IntrMitigate, 0x5100);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -6520,9 +6524,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
u16 status = RTL_R16(tp, IntrStatus);
- u16 irq_mask = RTL_R16(tp, IntrMask);
- if (status == 0xffff || !(status & irq_mask))
+ if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6540,7 +6543,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
- if (status & RTL_EVENT_NAPI) {
+ if (status & (RTL_EVENT_NAPI | LinkChg)) {
rtl_irq_disable(tp);
napi_schedule_irqoff(&tp->napi);
}
@@ -7350,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ /* Disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6073387511f8..67f9bb6e941b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
/* Link ON & Not select default PHY & not ghost PHY */
- if ((status & MII_STAT_LINK) && !default_phy &&
- (phy->phy_types != UNKNOWN))
- default_phy = phy;
- else {
+ if ((status & MII_STAT_LINK) && !default_phy &&
+ (phy->phy_types != UNKNOWN)) {
+ default_phy = phy;
+ } else {
status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
phy_home = phy;
else if(phy->phy_types == LAN)
phy_lan = phy;
- }
+ }
}
if (!default_phy && phy_home)
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
{
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
- << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwxgmac2_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
- !!(rdes0 & RDES0_FRAME_TYPE),
- !!(rdes0 & ERDES0_RX_MAC_ADDR));
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
- ehn_desc_rx_set_on_ring(p, end);
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
+ int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- pr_warn("%s: Oversized frame spanned multiple buffers\n",
- __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
- int end)
+ int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
- ndesc_rx_set_on_ring(p, end);
+ ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d8c5bc412219..4d9bcb4d0378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
- STMMAC_RING_MODE, 1, false, skb->len);
+ STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
- STMMAC_RING_MODE, 1, true, skb->len);
+ STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
+ skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 1, true, skb->len);
+ STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
+ skb->len);
}
tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
static void refill_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ struct stmmac_rx_queue *rx_q = priv_ptr;
+ struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 97c5e1aad88f..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
}
/**
@@ -3216,14 +3218,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
csum_insertion, priv->mode, 1, last_segment,
skb->len);
-
- /* The own bit must be the latest setting done when prepare the
- * descriptor and then barrier is needed to make sure that
- * all is coherent before granting the DMA engine.
- */
- wmb();
+ } else {
+ stmmac_set_tx_owner(priv, first);
}
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ wmb();
+
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -3350,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
- unsigned int entry = rx_q->cur_rx;
+ unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
- unsigned int next_entry;
unsigned int count = 0;
bool xmac;
@@ -3370,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- int status;
+ int entry, status;
struct dma_desc *p;
struct dma_desc *np;
+ entry = next_entry;
+
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -3429,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "len %d larger than size (%d)\n",
+ frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
- break;
+ continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3468,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -3488,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx chain\n",
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL;
@@ -3527,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
- entry = next_entry;
}
stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 5174d318901e..0a920c5936b2 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
- if (ret)
+ if (ret) {
+ of_node_put(interfaces);
return ret;
+ }
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
- if (ret)
+ if (ret) {
+ of_node_put(interfaces);
return ret;
+ }
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ec7e7ec24ff9..4041c75997ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
ret = of_address_to_resource(np, 0, &dmares);
if (ret) {
dev_err(&pdev->dev, "unable to get DMA resource\n");
+ of_node_put(np);
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
+ of_node_put(np);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool destroy;
+ bool tx_disable; /* if true, do not wake up queue again */
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..e0dce373cdd9 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
+ net_device->tx_disable = false;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
- if (netif_tx_queue_stopped(txq) &&
+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
- if (atomic_read(&nvchan->queue_sends) < 1) {
+ if (atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..b20fb0fb595b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
rcu_read_unlock();
}
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ nvscdev->tx_disable = false;
+ virt_wmb(); /* ensure queue wake up mechanism is on */
+
+ netif_tx_wake_all_queues(ndev);
+}
+
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
}
}
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ if (nvscdev) {
+ nvscdev->tx_disable = true;
+ virt_wmb(); /* ensure txq will not wake up after stop */
+ }
+
+ netif_tx_disable(ndev);
+}
+
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
- netif_tx_disable(net);
+ netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
- netif_tx_disable(ndev);
+ netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(net_device, net);
} else {
notify = true;
}
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index cd1d8faccca5..cd6b95e673a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
WQ_MEM_RECLAIM);
+ if (unlikely(!lp->wqueue)) {
+ ret = -ENOMEM;
+ goto err_hw_init;
+ }
ret = adf7242_hw_init(lp);
if (ret)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b6743f03dce0..3b88846de31b 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
- genlmsg_reply(skb, info);
+ res = genlmsg_reply(skb, info);
break;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 071869db44cf..520657945b82 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
help
MDIO devices and driver infrastructure code.
+if MDIO_DEVICE
+
config MDIO_BUS
tristate
default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
APM X-Gene SoC's.
endif
+endif
config PHYLINK
tristate
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9605d4fe540b..cb86a3e90c7d 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm54xx_phydsp_config(phydev);
+ /* Encode link speed into LED1 and LED3 pair (green/amber).
+ * Also flash these two LEDs on activity. This means configuring
+ * them for MULTICOLOR and encoding link/activity into them.
+ */
+ val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
+ BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
+ bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
+
+ val = BCM_LED_MULTICOLOR_IN_PHASE |
+ BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
+ BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
+ bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+
return 0;
}
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index bbd8c22067f3..97d45bd5b38e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#define DP83822_PHY_ID 0x2000a240
+#define DP83825I_PHY_ID 0x2000a150
+
#define DP83822_DEVADDR 0x1f
#define MII_DP83822_PHYSCR 0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
return 0;
}
+#define DP83822_PHY_DRIVER(_id, _name) \
+ { \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ .features = PHY_BASIC_FEATURES, \
+ .soft_reset = dp83822_phy_reset, \
+ .config_init = dp83822_config_init, \
+ .get_wol = dp83822_get_wol, \
+ .set_wol = dp83822_set_wol, \
+ .ack_interrupt = dp83822_ack_interrupt, \
+ .config_intr = dp83822_config_intr, \
+ .suspend = dp83822_suspend, \
+ .resume = dp83822_resume, \
+ }
+
static struct phy_driver dp83822_driver[] = {
- {
- .phy_id = DP83822_PHY_ID,
- .phy_id_mask = 0xfffffff0,
- .name = "TI DP83822",
- .features = PHY_BASIC_FEATURES,
- .config_init = dp83822_config_init,
- .soft_reset = dp83822_phy_reset,
- .get_wol = dp83822_get_wol,
- .set_wol = dp83822_set_wol,
- .ack_interrupt = dp83822_ack_interrupt,
- .config_intr = dp83822_config_intr,
- .suspend = dp83822_suspend,
- .resume = dp83822_resume,
- },
+ DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
+ DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
};
module_phy_driver(dp83822_driver);
static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
+ { DP83825I_PHY_ID, 0xfffffff0 },
{ },
};
MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index a238388eb1a5..0eec2913c289 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
static int meson_gxl_config_intr(struct phy_device *phydev)
{
u16 val;
+ int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
val = 0;
}
+ /* Ack any pending IRQ */
+ ret = meson_gxl_ack_interrupt(phydev);
+ if (ret)
+ return ret;
+
return phy_write(phydev, INTSRC_MASK, val);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 49fdd1ee798e..77068c545de0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
{
int ret;
- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
if (ret < 0)
return ret;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1d68921723dc..e9ca1c088d0b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
- if (!(tun->dev->flags & IFF_UP))
- return -EIO;
-
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
+ err = -EFAULT;
+drop:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
mutex_unlock(&tfile->napi_mutex);
}
- return -EFAULT;
+ return err;
}
}
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_lock();
+ if (unlikely(!(tun->dev->flags & IFF_UP))) {
+ err = -EIO;
+ rcu_read_unlock();
+ goto drop;
+ }
+
if (frags) {
/* Exercise flow dissector code path. */
u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
napi_free_frags(&tfile->napi);
+ rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
WARN_ON(1);
return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} else {
netif_rx_ni(skb);
}
+ rcu_read_unlock();
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 820a2fe7d027..aff995be2a31 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
.tx_fixup = aqc111_tx_fixup,
};
+static const struct driver_info qnap_info = {
+ .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
{AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
{AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
+ {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
{ },/* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5512a1038721..3e9b2c319e45 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..cd15c32b2e43 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,9 +1273,14 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_RX_HANDLER;
- dev->min_mtu = 0;
- dev->max_mtu = 0;
+ /* VRF devices do not care about MTU, but if the MTU is set
+ * too low then the ipv4 and ipv6 protocols are disabled
+ * which breaks networking.
+ */
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 077f1b9f2761..d76dfed8d9bb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net)) {
- gro_cells_destroy(&vxlan->gro_cells);
+ if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, head);
- }
}
for (h = 0; h < PORT_HASH_SIZE; ++h)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index e9822a3ec373..94132cfd1f56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
struct cfg80211_pmsr_result *res)
{
- s64 rtt_avg = res->ftm.rtt_avg * 100;
-
- do_div(rtt_avg, 6666);
+ s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
IWL_DEBUG_INFO(mvm, "entry %d\n", index);
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6eedc0ec7661..76629b98c78d 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
+ iowrite32(q->desc_dma, &q->regs->desc_base);
+ iowrite32(q->ndesc, &q->regs->ring_size);
q->head = ioread32(&q->regs->dma_idx);
q->tail = q->head;
iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
mt76_dma_sync_idx(dev, q);
- wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ wake = wake && q->stopped &&
+ qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index a033745adb2f..316167404729 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -679,19 +679,15 @@ out:
return ret;
}
-static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
- int idx = wcid->idx;
- int i;
+ int i, idx = wcid->idx;
rcu_assign_pointer(dev->wcid[idx], NULL);
synchronize_rcu();
- mutex_lock(&dev->mutex);
-
if (dev->drv->sta_remove)
dev->drv->sta_remove(dev, vif, sta);
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_free(dev->wcid_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ mutex_lock(&dev->mutex);
+ __mt76_sta_remove(dev, vif, sta);
mutex_unlock(&dev->mutex);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5dfb0601f101..bcbfd3c4a44b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -126,6 +126,7 @@ struct mt76_queue {
int ndesc;
int queued;
int buf_size;
+ bool stopped;
u8 buf_offset;
u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
+ int (*mcu_restart)(struct mt76_dev *dev);
};
struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index afcd86f735b4..4dcb465095d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
- __sw_hweight8(dev->beacon_mask))
+ if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
dev->beacon_check++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index d69e82c66ab2..b3ae0aaea62a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -27,12 +27,16 @@ static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
__le32 *txd = (__le32 *)skb->data;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
+ void *priv;
int idx;
u32 val;
+ u8 tid;
- if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
+ if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (!wcid)
goto free;
- msta = container_of(wcid, struct mt7603_sta, wcid);
+ priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
+ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+ val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+ txd[0] = cpu_to_le32(val);
+
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ ieee80211_sta_set_buffered(sta, tid, true);
+
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 15cc8f33b34d..d54dda67d036 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -112,7 +112,7 @@ static void
mt7603_phy_init(struct mt7603_dev *dev)
{
int rx_chains = dev->mt76.antenna_mask;
- int tx_chains = __sw_hweight8(rx_chains) - 1;
+ int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
(MT_WF_RMAC_RMCR_SMPS_MODE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 0a0115861b51..5e31d7da96fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1072,7 +1072,7 @@ out:
case MT_PHY_TYPE_HT:
final_rate_flags |= IEEE80211_TX_RC_MCS;
final_rate &= GENMASK(5, 0);
- if (i > 15)
+ if (final_rate > 15)
return false;
break;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index b10775ed92e6..cc0fe0933b2d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7603.h"
+#include "mac.h"
#include "eeprom.h"
static int
@@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
}
static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+static void
mt7603_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
__skb_queue_head_init(&list);
+ mt7603_wtbl_set_ps(dev, msta, false);
+
spin_lock_bh(&dev->ps_lock);
skb_queue_walk_safe(&msta->psq, skb, tmp) {
if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
skb_set_queue_mapping(skb, MT_TXQ_PSD);
__skb_unlink(skb, &msta->psq);
+ mt7603_ps_set_more_data(skb);
__skb_queue_tail(&list, skb);
nframes--;
}
spin_unlock_bh(&dev->ps_lock);
+ if (!skb_queue_empty(&list))
+ ieee80211_sta_eosp(sta);
+
mt7603_ps_tx_list(dev, &list);
if (nframes)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 4b0713f1fd5e..d06905ea8cc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
- int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
+ int n_chains = hweight8(dev->mt76.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index e13fea80d970..b920be1f5718 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
}
mem_base = devm_ioremap_resource(&pdev->dev, res);
- if (!mem_base) {
+ if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
- return -EINVAL;
+ return PTR_ERR(mem_base);
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 0290ba5869a5..736f81752b5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
{ MT_MM20_PROT_CFG, 0x01742004 },
{ MT_MM40_PROT_CFG, 0x03f42084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
- { MT_TX_RTS_CFG, 0x00092b20 },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
{ MT_TXOP_HLDR_ET, 0x00000002 },
{ MT_XIFS_TIME_CFG, 0x33a41010 },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 91718647da02..e5a06f74a6f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
- u32 asic_rev, mac_rev;
+ u32 mac_rev;
int ret;
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
goto err;
}
- asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
- asic_rev, mac_rev);
+ mdev->rev, mac_rev);
+ if (!is_mt76x0(dev)) {
+ ret = -ENODEV;
+ goto err;
+ }
/* Note: vendor driver skips this check for MT76X0U */
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 6915cce5def9..07061eb4d1e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
u16 false_cca;
s8 avg_rssi_all;
s8 agc_gain_adjust;
+ s8 agc_lowest_gain;
s8 low_gain;
s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
struct mt76x02_dfs_pattern_detector dfs_pd;
/* edcca monitor */
+ unsigned long ed_trigger_timeout;
bool ed_tx_blocked;
bool ed_monitor;
+ u8 ed_monitor_enabled;
+ u8 ed_monitor_learning;
u8 ed_trigger;
u8 ed_silent;
ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7610 ||
+ mt76_chip(&dev->mt76) == 0x7630 ||
+ mt76_chip(&dev->mt76) == 0x7650;
+}
+
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 7580c5c986ff..b1d6fd4861e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+ struct mt76x02_dev *dev = data;
+ enum nl80211_dfs_regions region = dev->dfs_pd.region;
+
+ dev->ed_monitor_enabled = !!val;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ region == NL80211_DFS_ETSI;
+ mt76x02_edcca_init(dev, true);
+
+ return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+ struct mt76x02_dev *dev = data;
+
+ *val = dev->ed_monitor_enabled;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+ "%lld\n");
+
void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
+ debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index e4649103efd4..17d12d212d1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
- dev->ed_monitor = region == NL80211_DFS_ETSI;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
dfs_pd->region = region;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 91ff6598eccf..9ed231abe916 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x02_cipher_type cipher;
+ u8 key_data[32];
+ u32 iv, eiv;
+ u64 pn;
+
+ cipher = mt76x02_mac_get_key_info(key, key_data);
+ iv = mt76_rr(dev, MT_WCID_IV(idx));
+ eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+ pn = (u64)eiv << 16;
+ if (cipher == MT_CIPHER_TKIP) {
+ pn |= (iv >> 16) & 0xff;
+ pn |= (iv & 0xff) << 8;
+ } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ pn |= iv & 0xffff;
+ } else {
+ return;
+ }
+
+ atomic64_set(&key->tx_pn, pn);
+}
+
+
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
+ u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
if (key) {
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+ pn = atomic64_read(&key->tx_pn);
+
iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP)
+ if (cipher >= MT_CIPHER_TKIP) {
iv_data[3] |= 0x20;
+ put_unaligned_le32(pn >> 16, &iv_data[4]);
+ }
+
+ if (cipher == MT_CIPHER_TKIP) {
+ iv_data[0] = (pn >> 8) & 0xff;
+ iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+ iv_data[2] = pn & 0xff;
+ } else if (cipher >= MT_CIPHER_AES_CCMP) {
+ put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+ }
}
mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
}
}
mt76x02_edcca_tx_enable(dev, true);
+ dev->ed_monitor_learning = true;
/* clear previous CCA timer value */
mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
#define MT_EDCCA_TH 92
#define MT_EDCCA_BLOCK_TH 2
+#define MT_EDCCA_LEARN_TH 50
+#define MT_EDCCA_LEARN_CCA 180
+#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
+
static void mt76x02_edcca_check(struct mt76x02_dev *dev)
{
ktime_t cur_time;
@@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
dev->ed_trigger = 0;
}
- if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
- !dev->ed_tx_blocked)
+ if (dev->cal.agc_lowest_gain &&
+ dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+ dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+ dev->ed_monitor_learning = false;
+ dev->ed_trigger_timeout = jiffies + 20 * HZ;
+ } else if (!dev->ed_monitor_learning &&
+ time_is_after_jiffies(dev->ed_trigger_timeout)) {
+ dev->ed_monitor_learning = true;
+ mt76x02_edcca_tx_enable(dev, true);
+ }
+
+ if (dev->ed_monitor_learning)
+ return;
+
+ if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, false);
- else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
- dev->ed_tx_blocked)
+ else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 6b1f25d2f64c..caeeef96c42f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
u8 *mac);
void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1229f19f2b02..daaed1220147 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include "mt76x02.h"
+#include "mt76x02_mcu.h"
#include "mt76x02_trace.h"
struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
return i < 4;
}
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, void *data)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76_wcid *wcid;
+
+ if (!sta)
+ return;
+
+ wcid = (struct mt76_wcid *) sta->drv_priv;
+
+ if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+ return;
+
+ mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+ int i;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+ rcu_read_unlock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt76x02_sta *msta;
+ struct mt76_wcid *wcid;
+ void *priv;
+
+ wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!wcid)
+ continue;
+
+ priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+ priv = msta->vif;
+ vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+ __mt76_sta_remove(&dev->mt76, vif, sta);
+ memset(msta, 0, sizeof(*msta));
+ }
+
+ dev->vif_mask = 0;
+ dev->beacon_mask = 0;
+}
+
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
{
u32 mask = dev->mt76.mmio.irqmask;
+ bool restart = dev->mt76.mcu_ops->mcu_restart;
int i;
ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_lock(&dev->mt76.mutex);
+ if (restart)
+ mt76x02_reset_state(dev);
+
if (dev->beacon_mask)
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
/* let fw reset DMA */
mt76_set(dev, 0x734, 0x3);
+ if (restart)
+ dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
mt76_queue_rx_reset(dev, i);
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
- mt76_set(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ mt76x02_mac_start(dev);
+
if (dev->ed_monitor)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
- if (dev->beacon_mask)
+ if (dev->beacon_mask && !restart)
mt76_set(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
napi_schedule(&dev->mt76.napi[i]);
}
- ieee80211_wake_queues(dev->mt76.hw);
-
- mt76_txq_schedule_all(&dev->mt76);
+ if (restart) {
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+ ieee80211_restart_hw(dev->mt76.hw);
+ } else {
+ ieee80211_wake_queues(dev->mt76.hw);
+ mt76_txq_schedule_all(&dev->mt76);
+ }
}
static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index a020c757ba5c..a54b63a96eae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
ret = true;
}
+ dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 43f07461c8d3..6fb52b596d42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mt76x02_insert_hdr_pad(skb);
- txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+ skb_push(skb, sizeof(struct mt76x02_txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index a48c261b0c63..cd072ac614f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int idx = 0;
+ memset(msta, 0, sizeof(*msta));
+
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0)
return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76_txq *mtxq;
+ memset(mvif, 0, sizeof(*mvif));
+
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
+ /* Allow to change address in HW if we create first interface. */
+ if (!dev->vif_mask &&
+ (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+ memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+ mt76x02_mac_setaddr(dev, vif->addr);
+
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (dev->vif_mask & BIT(idx))
return -EBUSY;
- /* Allow to change address in HW if we create first interface. */
- if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
- mt76x02_mac_setaddr(dev, vif->addr);
-
dev->vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index f8534362e2c8..a30ef2c5a9db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_TX_SW_CFG1, 0x00010000 },
{ MT_TX_SW_CFG2, 0x00000000 },
{ MT_TXOP_CTRL_CFG, 0x0400583f },
- { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_TX_TIMEOUT_CFG, 0x000a2290 },
{ MT_TX_RETRY_CFG, 0x47f01f0f },
{ MT_EXP_ACK_TIME, 0x002c00dc },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 6c619f1c65c9..d7abe3d73bad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
void mt76x2_cleanup(struct mt76x02_dev *dev);
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 984d9c4c2e1a..d3927a13e92e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
}
}
-static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
const u8 *macaddr = dev->mt76.macaddr;
u32 val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 03e24ae7f66c..605dc66ae83b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -165,9 +165,30 @@ error:
return -ENOENT;
}
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+ struct mt76x02_dev *dev;
+ int ret;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ mt76x02_mcu_cleanup(dev);
+ mt76x2_mac_reset(dev, true);
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ return 0;
+}
+
int mt76x2_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+ .mcu_restart = mt76pci_mcu_restart,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 1848e8ab2e21..769a9b972044 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
- if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ val = 0x1836 << 16;
+ if (!mt76x2_has_ext_lna(dev) &&
+ dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
- else
- val = 0x1836 << 16;
+
+ if (mt76x2_has_ext_lna(dev) &&
+ dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
+ dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+ val = 0x0f36 << 16;
val |= 0xf8;
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
+ u32 agc_35, agc_37;
bool gain_change;
int low_gain;
u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
else
low_gain_delta = 14;
+ agc_37 = 0x2121262c;
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ agc_35 = 0x11111516;
+ else if (low_gain == 2)
+ agc_35 = agc_37 = 0x08080808;
+ else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ agc_35 = 0x10101014;
+ else
+ agc_35 = 0x11111116;
+
if (low_gain == 2) {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
dev->cal.agc_gain_adjust = 0;
} else {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
- mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
- else
- mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
- mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
dev->cal.agc_gain_adjust = low_gain_delta;
}
+ mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+ mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
mt76x2_phy_set_gain_val(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ddb6b2c48e01..ac0f13d46299 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -21,11 +21,10 @@
#include "mt76x2u.h"
static const struct usb_device_id mt76x2u_device_table[] = {
- { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
{ USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
{ USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
{ USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
- { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ if (!is_mt76x2(dev)) {
+ err = -ENODEV;
+ goto err;
+ }
err = mt76x2u_register_device(dev);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 5e84b4535cb1..3b82345756ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
- mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5a349fe3e576..2585df512335 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
- if (q->queued > q->ndesc - 8)
+ if (q->queued > q->ndesc - 8 && !q->stopped) {
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ q->stopped = true;
+ }
+
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
if (last_skb) {
mt76_queue_ps_skb(dev, sta, last_skb, true);
dev->queue_ops->kick(dev, hwq);
+ } else {
+ ieee80211_sta_eosp(sta);
}
+
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct mt76_queue *hwq = mtxq->hwq;
+ if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ return;
+
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
list_add_tail(&mtxq->list, &hwq->swq);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index ae6ada370597..4c1abd492405 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
- wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+
+ wake = q->stopped && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
+
if (!q->queued)
wake_up(&dev->tx_wait);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index d8b7863f7926..6ae7f14dc9bf 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
+ if ((asic_rev >> 16) != 0x7601) {
+ ret = -ENODEV;
+ goto err;
+ }
/* Note: vendor driver skips this check for MT7601U */
if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb70badf..f0716f6ce41f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- enum nvme_ana_state old;
-
mutex_lock(&ns->head->lock);
- old = ns->ana_state;
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
- if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+ if (nvme_state_is_live(ns->ana_state))
nvme_mpath_set_live(ns);
mutex_unlock(&ns->head->lock);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e7e08889865e..68c49dd67210 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return ret;
}
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2d73b66e3686..b3e765a95af8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
ret = nvmet_p2pmem_ns_enable(ns);
if (ret)
- goto out_unlock;
+ goto out_dev_disable;
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 3e43212d3c1c..bc6ebb51b0bf 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ err:
return ret;
}
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
{
- bv->bv_page = sg_page_iter_page(iter);
- bv->bv_offset = iter->sg->offset;
- bv->bv_len = PAGE_SIZE - iter->sg->offset;
+ bv->bv_page = sg_page(sg);
+ bv->bv_offset = sg->offset;
+ bv->bv_len = sg->length;
}
static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
- struct sg_page_iter sg_pg_iter;
+ ssize_t nr_bvec = req->sg_cnt;
unsigned long bv_cnt = 0;
bool is_sync = false;
size_t len = 0, total_len = 0;
ssize_t ret = 0;
loff_t pos;
-
+ int i;
+ struct scatterlist *sg;
if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
}
memset(&req->f.iocb, 0, sizeof(struct kiocb));
- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
len += req->f.bvec[bv_cnt].bv_len;
total_len += req->f.bvec[bv_cnt].bv_len;
bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
static void nvmet_file_execute_rw(struct nvmet_req *req)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+ ssize_t nr_bvec = req->sg_cnt;
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1be571c20062..6bad04cbb1d3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -157,8 +157,12 @@
#define DBG_IRT(x...)
#endif
+#ifdef CONFIG_64BIT
+#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
+#else
#define COMPARE_IRTE_ADDR(irte, hpa) \
- ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
+ ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
+#endif
#define IOSAPIC_REG_SELECT 0x00
#define IOSAPIC_REG_WINDOW 0x10
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 56dd83a45e55..5484a46dafda 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
- struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
- memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_dev_model(port, name, &par_cb, devnum);
+ dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
parport_put_port(port);
if (!dev)
return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
kfree(deviceid);
return detected;
}
-
-static int daisy_drv_probe(struct pardevice *par_dev)
-{
- struct device_driver *drv = par_dev->dev.driver;
-
- if (strcmp(drv->name, "daisy_drv"))
- return -ENODEV;
- if (strcmp(par_dev->name, daisy_dev_name))
- return -ENODEV;
-
- return 0;
-}
-
-static struct parport_driver daisy_driver = {
- .name = "daisy_drv",
- .probe = daisy_drv_probe,
- .devmodel = true,
-};
-
-int daisy_drv_init(void)
-{
- return parport_register_driver(&daisy_driver);
-}
-
-void daisy_drv_exit(void)
-{
- parport_unregister_driver(&daisy_driver);
-}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..e035174ba205 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open(devnum, daisy_dev_name);
+ struct pardevice *dev = parport_open (devnum, "Device ID probe");
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0171b8dbcdcd..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
int parport_bus_init(void)
{
- int retval;
-
- retval = bus_register(&parport_bus_type);
- if (retval)
- return retval;
- daisy_drv_init();
-
- return 0;
+ return bus_register(&parport_bus_type);
}
void parport_bus_exit(void)
{
- daisy_drv_exit();
bus_unregister(&parport_bus_type);
}
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097b43df..4bbd9ede38c8 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
int new_mode;
- if (phy->index != 0)
+ if (phy->index != 0) {
+ if (mode == PHY_MODE_USB_HOST)
+ return 0;
return -EINVAL;
+ }
switch (mode) {
case PHY_MODE_USB_HOST:
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 900c7073c46f..71308766e891 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
ret = cros_ec_create_pdinfo(debug_info);
if (ret)
- goto remove_debugfs;
+ goto remove_log;
ec->debug_info = debug_info;
@@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
return 0;
+remove_log:
+ cros_ec_cleanup_console_log(debug_info);
remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
@@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
{
struct cros_ec_dev *ec = dev_get_drvdata(dev);
- cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
+ if (ec->debug_info->log_buffer.buf)
+ cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
return 0;
}
@@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
{
struct cros_ec_dev *ec = dev_get_drvdata(dev);
- schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
+ if (ec->debug_info->log_buffer.buf)
+ schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index f6ff29a11f1a..14355668ddfa 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
msg->command, msg->type, msg->flags, msg->response_size,
msg->request_size);
+ mutex_lock(&ec->mailbox_lock);
/* Prepare request packet */
rq = ec->data_buffer;
wilco_ec_prepare(msg, rq);
- mutex_lock(&ec->mailbox_lock);
ret = wilco_ec_transfer(ec, msg, rq);
mutex_unlock(&ec->mailbox_lock);
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 91751617b37a..c53a2185a039 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
arb->rstc.ops = &meson_audio_arb_rstc_ops;
arb->rstc.of_node = dev->of_node;
+ arb->rstc.owner = THIS_MODULE;
/*
* Enable general :
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a71734c41693..f933c06bff4f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
will be called rtc-s5m.
config RTC_DRV_SD3078
- tristate "ZXW Crystal SD3078"
+ tristate "ZXW Shenzhen whwave SD3078"
help
- If you say yes here you get support for the ZXW Crystal
+ If you say yes here you get support for the ZXW Shenzhen whwave
SD3078 RTC chips.
This driver can also be built as a module. If so, the module
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index e5444296075e..4d6bf9304ceb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
- enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+ return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0;
}
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
- disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+ return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0;
}
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index b4e054c64bad..69b54e5556c0 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
+ /*
+ * TODO: some models have alarms on a minute boundary but still support
+ * real hardware interrupts. Add this once the core supports it.
+ */
+ if (config->rtc_data_start != RTC_SEC)
+ rtc->rtc_dev->uie_unsupported = 1;
+
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index d417b203cbc5..1d3de2a3d1a4 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
- int value = 0xff; /* return 0xff for ignored values */
+ int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4159c63a5fd2..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
#include <asm/crw.h>
#include <asm/isc.h>
#include <asm/ebcdic.h>
+#include <asm/ap.h>
#include "css.h"
#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
" failed (rc=%d).\n", ret);
}
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+ CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+ if (sei_area->rs != 5)
+ return;
+
+ ap_bus_cfg_chg();
+}
+
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 3: /* ap config changed */
+ chsc_process_sei_ap_cfg_chg(sei_area);
+ break;
case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area);
break;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
struct irb *irb;
+ bool is_final;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
+ is_final = !(scsw_actl(&irb->scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- cp_free(&private->cp);
+ if (is_final)
+ cp_free(&private->cp);
}
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
- if (private->mdev)
+ if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
+ /* prepare ap queue device removal */
if (is_queue_dev(dev))
- ap_queue_remove(to_ap_queue(dev));
+ ap_queue_prepare_remove(to_ap_queue(dev));
+
+ /* driver's chance to clean up gracefully */
if (ap_drv->remove)
ap_drv->remove(ap_dev);
+ /* now do the ap queue device remove */
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
+
/* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_list_lock);
if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+ AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+ ap_bus_force_rescan();
+}
+
+/*
* hex2bitmap() - parse hex mask string and set bitmap.
* Valid strings are "0x012345678" with at least one valid hex number.
* Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
AP_STATE_SUSPEND_WAIT,
+ AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */
NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..6a340f2c3556 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
+ [AP_STATE_REMOVE] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
[AP_STATE_UNBOUND] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
}
EXPORT_SYMBOL(ap_flush_queue);
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
{
- ap_flush_queue(aq);
+ spin_lock_bh(&aq->lock);
+ /* flush queue */
+ __ap_flush_queue(aq);
+ /* set REMOVE state to prevent new messages are queued in */
+ aq->state = AP_STATE_REMOVE;
del_timer_sync(&aq->timeout);
+ spin_unlock_bh(&aq->lock);
+}
- /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+ /*
+ * all messages have been flushed and the state is
+ * AP_STATE_REMOVE. Now reset with zero which also
+ * clears the irq registration and move the state
+ * to AP_STATE_UNBOUND to signal that this queue
+ * is not used by any driver currently.
+ */
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
aq->state = AP_STATE_UNBOUND;
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_remove);
void ap_queue_reinit_state(struct ap_queue *aq)
{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module **pmod,
unsigned int weight)
{
if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
+ *pmod = zq->queue->ap_dev.drv->driver.owner;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module *mod,
unsigned int weight)
{
- struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
unsigned int func_code;
unsigned short *domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
unsigned int func_code;
struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 197b0f5b63e7..44bd6f04c145 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
+ struct sk_buff *skb;
+
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
- __skb_queue_purge(&buf->skb_list);
+ while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+ consume_skb(skb);
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8efb2e8ff8f4..c3067fd3bd9e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
QETH_TXQ_STAT_INC(queue, tx_dropped);
- QETH_TXQ_STAT_INC(queue, tx_errors);
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ qeth_l2_vnicc_set_defaults(card);
+
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->mac_htable);
- card->info.hwtrap = 0;
- qeth_l2_vnicc_set_defaults(card);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7e68d9d16859..53712cf26406 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
- QETH_TXQ_STAT_INC(queue, tx_errors);
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ hash_init(card->ip_htable);
+
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
if (rc)
return rc;
}
- hash_init(card->ip_htable);
+
hash_init(card->ip_mc_htable);
- card->info.hwtrap = 0;
return 0;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a64680d5b..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
add_timer(&erp_action->timer);
}
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag)
+{
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+ read_unlock(&adapter->port_list_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *dbftag)
{
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b0b21b..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
char *dbftag);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e3abbe..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
- if (!port->d_id)
- zfcp_erp_port_reopen(port,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
no_entries = be16_to_cpu(head->rscn_plen) /
sizeof(struct fc_els_rscn_page);
+ if (no_entries > 1) {
+ /* handle failed ports */
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if (port->d_id)
+ continue;
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1");
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ }
+
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07c5222..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret = SUCCESS, fc_ret;
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+ zfcp_erp_wait(adapter);
+ }
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 1df5171594b8..11fb68d7e60d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
return capacity;
}
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+ return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
static inline int aac_adapter_check_health(struct aac_dev *dev)
{
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -1;
return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e67e032936ef..78430a7b294c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
return -ETIMEDOUT;
}
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -EFAULT;
if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -EFAULT;
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 3c3cf89f713f..14bac4966c87 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1801,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
}
hisi_sas_dereg_device(hisi_hba, device);
+ if (dev_is_sata(device)) {
+ rc = hisi_sas_softreset_ata_disk(device);
+ if (rc)
+ return TMF_RESP_FUNC_FAILED;
+ }
+
rc = hisi_sas_debug_I_T_nexus_reset(device);
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f131433..3ad997ac3510 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+ { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
};
static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = rsp->data.info.rsp_code;
- scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+ scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
"flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
- cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+ cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
- dev_info(vhost->dev, "Re-enabling adapter\n");
+ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
ibmvfc_purge_requests(vhost, DID_REQUEUE);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
- } else {
- dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+ } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+ dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+ } else {
+ dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
}
return;
case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
break;
}
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
- ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
- ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
break;
}
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
- mad->iu.status, mad->iu.error,
+ be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
- rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
- rsp->fc_explain, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+ status);
break;
}
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c4a9a8..459cc288ba1d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
IBMVFC_CRQ_XPORT_EVENT = 0xFF,
};
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
IBMVFC_CRQ_INIT = 0x01,
IBMVFC_CRQ_INIT_COMPLETE = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+ IBMVFC_PARTNER_FAILED = 0x01,
+ IBMVFC_PARTNER_DEREGISTER = 0x02,
IBMVFC_PARTITION_MIGRATED = 0x06,
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 1135e74646e2..8cec5230fe31 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -96,6 +96,7 @@ static int client_reserve = 1;
static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static LIST_HEAD(ibmvscsi_head);
+static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
dev_set_drvdata(&vdev->dev, hostdata);
+ spin_lock(&ibmvscsi_driver_lock);
list_add_tail(&hostdata->host_list, &ibmvscsi_head);
+ spin_unlock(&ibmvscsi_driver_lock);
return 0;
add_srp_port_failed:
@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
- list_del(&hostdata->host_list);
- unmap_persist_bufs(hostdata);
+ unsigned long flags;
+
+ srp_remove_host(hostdata->host);
+ scsi_remove_host(hostdata->host);
+
+ purge_requests(hostdata, DID_ERROR);
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
release_event_pool(&hostdata->pool, hostdata);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
kthread_stop(hostdata->work_thread);
- srp_remove_host(hostdata->host);
- scsi_remove_host(hostdata->host);
+ unmap_persist_bufs(hostdata);
+
+ spin_lock(&ibmvscsi_driver_lock);
+ list_del(&hostdata->host_list);
+ spin_unlock(&ibmvscsi_driver_lock);
+
scsi_host_put(hostdata->host);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c98f264f1d83..a497b2c0cb79 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (lpfc_cmd->waitq)
- wake_up(lpfc_cmd->waitq);
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (lpfc_cmd->waitq) {
+ wake_up(lpfc_cmd->waitq);
lpfc_cmd->waitq = NULL;
}
spin_unlock(&lpfc_cmd->buf_lock);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e57774472e75..1d8c584ec1e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
if (smid < ioc->hi_priority_smid) {
struct scsiio_tracker *st;
+ void *request;
st = _get_st_from_smid(ioc, smid);
if (!st) {
_base_recovery_check(ioc);
return;
}
+
+ /* Clear MPI request frame */
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+
mpt3sas_base_clear_st(ioc, st);
_base_recovery_check(ioc);
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8bb5b8f9f4d2..1ccfbc7eebe0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
struct scsiio_tracker *st;
+ Mpi25SCSIIORequest_t *mpi_request;
if (smid > 0 &&
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
u32 unique_tag = smid - 1;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /*
+ * If SCSI IO request is outstanding at driver level then
+ * DevHandle filed must be non-zero. If DevHandle is zero
+ * then it means that this smid is free at driver level,
+ * so return NULL.
+ */
+ if (!mpi_request->DevHandle)
+ return scmd;
+
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e74a62448ba4..e5db9a9954dc 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- struct qedi_nvm_iscsi_image nvm_image;
-
qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(nvm_image),
+ sizeof(struct qedi_nvm_iscsi_image),
&qedi->nvm_buf_dma, GFP_KERNEL);
if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
static int qedi_get_boot_info(struct qedi_ctx *qedi)
{
int ret = 1;
- struct qedi_nvm_iscsi_image nvm_image;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Get NVM iSCSI CFG image\n");
ret = qedi_ops->common->nvm_get_image(qedi->cdev,
QED_NVM_IMAGE_ISCSI_CFG,
(char *)qedi->iscsi_image,
- sizeof(nvm_image));
+ sizeof(struct qedi_nvm_iscsi_image));
if (ret)
QEDI_ERR(&qedi->dbg_ctx,
"Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 420045155ba0..0c700b140ce7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if ((domain & 0xf0) == 0xf0)
continue;
+ /* Bypass if not same domain and area of adapter. */
+ if (area && domain && ((area != vha->d_id.b.area) ||
+ (domain != vha->d_id.b.domain)) &&
+ (ha->current_topology == ISP_CFG_NL))
+ continue;
+
+
/* Bypass invalid local loop ID. */
if (loop_id > LAST_LOCAL_LOOP_ID)
continue;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 677f82fdf56f..91f576d743fe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
goto eh_reset_failed;
}
err = 2;
- if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
+ if (do_reset(fcport, cmd->device->lun, 1)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 16a18d5d856f..6e4f4931ae17 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd07b916..a08ff3bd6310 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,7 @@ static struct {
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf3a75d..c14006ac98f9 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"NETAPP", "INF-01-00", "rdac", },
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
+ {"LENOVO", "DE_Series", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 20189675677a..601b9f1de267 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -585,10 +585,17 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (!blk_rq_is_scsi(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
cmd->flags &= ~SCMD_INITIALIZED;
- destroy_rcu_head(&cmd->rcu);
}
/*
+ * Calling rcu_barrier() is not necessary here because the
+ * SCSI error handler guarantees that the function called by
+ * call_rcu() has been called before scsi_end_request() is
+ * called.
+ */
+ destroy_rcu_head(&cmd->rcu);
+
+ /*
* In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
@@ -2541,8 +2548,10 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- sdev->quiesced_by = NULL;
- blk_clear_pm_only(sdev->request_queue);
+ if (sdev->quiesced_by) {
+ sdev->quiesced_by = NULL;
+ blk_clear_pm_only(sdev->request_queue);
+ }
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040faed00..3b119ca0cc0c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to run
+ * the queue to avoid I/O hang.
+ */
+ if (ret == 0 && state == SDEV_RUNNING)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0508831d6fb9..0a82e93566dc 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
/* flush running scans then delete devices */
flush_work(&session->scan_work);
+ /* flush running unbind operations */
+ flush_work(&session->unbind_work);
__iscsi_unbind_session(&session->unbind_work);
/* hw iscsi may not have removed all connections from session */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 251db30d0882..2b2bc4b49d78 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- /*
- * XXX and what if there are packets in flight and this close()
- * XXX is followed by a "rmmod sd_mod"?
- */
-
scsi_disk_put(sdkp);
}
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
unsigned int opt_xfer_bytes =
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ if (sdkp->opt_xfer_blocks == 0)
+ return false;
+
if (sdkp->opt_xfer_blocks > dev_max) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
-
+ struct request_queue *q = disk->queue;
+
ida_free(&sd_index_ida, sdkp->index);
+ /*
+ * Wait until all requests that are in progress have completed.
+ * This is necessary to avoid that e.g. scsi_end_request() crashes
+ * due to clearing the disk->private_data pointer. Wait from inside
+ * scsi_disk_release() instead of from sd_release() to avoid that
+ * freezing and unfreezing the request queue affects user space I/O
+ * in case multiple processes open a /dev/sd... node concurrently.
+ */
+ blk_mq_freeze_queue(q);
+ blk_mq_unfreeze_queue(q);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 84380bae20f1..8472de1007ff 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -385,7 +385,7 @@ enum storvsc_request_type {
* This is the end of Protocol specific defines.
*/
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
static u32 max_outstanding_req_per_channel;
static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
struct device *dev = &device->device;
struct storvsc_device *stor_device;
- int num_cpus = num_online_cpus();
int num_sc;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
- num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+ /*
+ * If the number of CPUs is artificially restricted, such as
+ * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+ * sub-channels >= the number of CPUs. These sub-channels
+ * should not be created. The primary channel is already created
+ * and assigned to one CPU, so check against # CPUs - 1.
+ */
+ num_sc = min((int)(num_online_cpus() - 1), max_chns);
+ if (!num_sc)
+ return;
+
stor_device = get_out_stor_device(device);
if (!stor_device)
return;
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 9351349cf0a9..1e0041ec8132 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -150,7 +150,12 @@ struct bcm2835_power {
static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{
- u64 start = ktime_get_ns();
+ u64 start;
+
+ if (!reg)
+ return 0;
+
+ start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
{
- u64 start = ktime_get_ns();
+ u64 start;
+
+ if (!reg)
+ return 0;
+
+ start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
}
}
-static void
+static int
bcm2835_init_power_domain(struct bcm2835_power *power,
int pd_xlate_index, const char *name)
{
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
dom->clk = devm_clk_get(dev->parent, name);
+ if (IS_ERR(dom->clk)) {
+ int ret = PTR_ERR(dom->clk);
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Some domains don't have a clk, so make sure that we
+ * don't deref an error pointer later.
+ */
+ dom->clk = NULL;
+ }
dom->base.name = name;
dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
pm_genpd_init(&dom->base, NULL, true);
power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+ return 0;
}
/** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
};
- int ret, i;
+ int ret = 0, i;
u32 id;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
- for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
- bcm2835_init_power_domain(power, i, power_domain_names[i]);
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+ ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+ if (ret)
+ goto fail;
+ }
for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
ret = devm_reset_controller_register(dev, &power->reset);
if (ret)
- return ret;
+ goto fail;
of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
dev_info(dev, "Broadcom BCM2835 power domains driver");
return 0;
+
+fail:
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+ struct generic_pm_domain *dom = &power->domains[i].base;
+
+ if (dom->name)
+ pm_genpd_remove(dom);
+ }
+ return ret;
}
static int bcm2835_power_remove(struct platform_device *pdev)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b96cfe4..62951e836cbc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
source "drivers/staging/mt7621-mmc/Kconfig"
-source "drivers/staging/mt7621-eth/Kconfig"
-
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce13ff4..d1b17ddcd354 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 687537203d9c..d9725888af6f 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
#
config XIL_AXIS_FIFO
tristate "Xilinx AXI-Stream FIFO IP core driver"
+ depends on OF
default n
help
This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569cfca5d..0dff1ac057cd 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int mask);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f42c0f..5a32b8fc000e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
EXPORT_SYMBOL_GPL(comedi_dio_update_state);
/**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
* @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
*
* Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
*
* For digital input, output or input/output subdevices, samples for
* multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
*
* Returns the overall scan length in bytes.
*/
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- struct comedi_cmd *cmd = &s->async->cmd;
unsigned int num_samples;
unsigned int bits_per_sample;
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
}
return comedi_samples_to_bytes(s, num_samples);
}
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag. For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ return comedi_bytes_per_scan_cmd(s, cmd);
+}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59ac6706..b04dad8c7092 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
struct ni_private *devpriv = dev->private;
+ unsigned int bytes_per_scan;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
- s->async->prealloc_bufsz /
- comedi_bytes_per_scan(s));
+ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+ if (bytes_per_scan) {
+ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+ s->async->prealloc_bufsz /
+ bytes_per_scan);
+ }
if (err)
return 3;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12e0dc..9bbc68729c11 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
[EROFS_FT_SYMLINK] = DT_LNK,
};
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+ unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+ /* since the on-disk name could not have the trailing '\0' */
+ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+ memcpy(dbg_namebuf, de_name, de_namelen);
+ dbg_namebuf[de_namelen] = '\0';
+
+ debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+ de_namelen, d_type);
+#endif
+}
+
static int erofs_fill_dentries(struct dir_context *ctx,
void *dentry_blk, unsigned int *ofs,
unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
de = dentry_blk + *ofs;
while (de < end) {
const char *de_name;
- int de_namelen;
+ unsigned int de_namelen;
unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
- unsigned int dbg_namelen;
- unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
- if (unlikely(de->file_type < EROFS_FT_MAX))
+ if (de->file_type < EROFS_FT_MAX)
d_type = erofs_filetype_table[de->file_type];
else
d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
nameoff = le16_to_cpu(de->nameoff);
de_name = (char *)dentry_blk + nameoff;
- de_namelen = unlikely(de + 1 >= end) ?
- /* last directory entry */
- strnlen(de_name, maxsize - nameoff) :
- le16_to_cpu(de[1].nameoff) - nameoff;
+ /* the last dirent in the block? */
+ if (de + 1 >= end)
+ de_namelen = strnlen(de_name, maxsize - nameoff);
+ else
+ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
/* a corrupted entry is found */
- if (unlikely(de_namelen < 0)) {
+ if (unlikely(nameoff + de_namelen > maxsize ||
+ de_namelen > EROFS_NAME_LEN)) {
DBG_BUGON(1);
return -EIO;
}
-#ifdef CONFIG_EROFS_FS_DEBUG
- dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
- memcpy(dbg_namebuf, de_name, dbg_namelen);
- dbg_namebuf[dbg_namelen] = '\0';
-
- debugln("%s, found de_name %s de_len %d d_type %d", __func__,
- dbg_namebuf, de_namelen, d_type);
-#endif
-
+ debug_one_dentry(d_type, de_name, de_namelen);
if (!dir_emit(ctx, de_name, de_namelen,
le64_to_cpu(de->nid), d_type))
/* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc50e09c..31eef8395774 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ repeat:
overlapped = false;
compressed_pages = grp->compressed_pages;
+ err = 0;
for (i = 0; i < clusterpages; ++i) {
unsigned int pagenr;
@@ -981,26 +982,39 @@ repeat:
DBG_BUGON(!page);
DBG_BUGON(!page->mapping);
- if (z_erofs_is_stagingpage(page))
- continue;
+ if (!z_erofs_is_stagingpage(page)) {
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi)) {
- DBG_BUGON(!PageUptodate(page));
- continue;
- }
+ if (page->mapping == MNGD_MAPPING(sbi)) {
+ if (unlikely(!PageUptodate(page)))
+ err = -EIO;
+ continue;
+ }
#endif
- /* only non-head page could be reused as a compressed page */
- pagenr = z_erofs_onlinepage_index(page);
+ /*
+ * only if non-head page can be selected
+ * for inplace decompression
+ */
+ pagenr = z_erofs_onlinepage_index(page);
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
- ++sparsemem_pages;
- pages[pagenr] = page;
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
+ ++sparsemem_pages;
+ pages[pagenr] = page;
- overlapped = true;
+ overlapped = true;
+ }
+
+ /* PG_error needs checking for inplaced and staging pages */
+ if (unlikely(PageError(page))) {
+ DBG_BUGON(PageUptodate(page));
+ err = -EIO;
+ }
}
+ if (unlikely(err))
+ goto out;
+
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
skip_allocpage:
vout = erofs_vmap(pages, nr_pages);
+ if (!vout) {
+ err = -ENOMEM;
+ goto out;
+ }
err = z_erofs_vle_unzip_vmap(compressed_pages,
clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
if (page->mapping == mc) {
WRITE_ONCE(grp->compressed_pages[nr], page);
+ ClearPageError(page);
if (!PagePrivate(page)) {
/*
* impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a2731a..0daac9b984a8 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
- if (clusterpages == 1)
+ if (clusterpages == 1) {
vin = kmap_atomic(compressed_pages[0]);
- else
+ } else {
vin = erofs_vmap(compressed_pages, clusterpages);
+ if (!vin)
+ return -ENOMEM;
+ }
preempt_disable();
vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b73385540216..250c15ace2a7 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
status = "okay";
};
-&ethernet {
- //mtd-mac-address = <&factory 0xe000>;
- gmac1: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
- phy-handle = <&phy1>;
- };
-
- mdio-bus {
- phy1: ethernet-phy@1 {
- reg = <1>;
- phy-mode = "rgmii";
- };
- };
-};
-
&pinctrl {
state_default: pinctrl0 {
gpio {
@@ -141,3 +125,16 @@
};
};
};
+
+&switch0 {
+ ports {
+ port@0 {
+ label = "ethblack";
+ status = "ok";
+ };
+ port@4 {
+ label = "ethblue";
+ status = "ok";
+ };
+ };
+};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff3680ce4b..17020e24abd2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
mediatek,ethsys = <&ethsys>;
- mediatek,switch = <&gsw>;
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+ status = "off";
+ phy-mode = "rgmii";
+ phy-handle = <&phy5>;
+ };
mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- phy1f: ethernet-phy@1f {
- reg = <0x1f>;
+ phy5: ethernet-phy@5 {
+ reg = <5>;
phy-mode = "rgmii";
};
+
+ switch0: switch0@0 {
+ compatible = "mediatek,mt7621";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ mediatek,mcm;
+ resets = <&rstctrl 2>;
+ reset-names = "mcm";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ port@0 {
+ status = "off";
+ reg = <0>;
+ label = "lan0";
+ };
+ port@1 {
+ status = "off";
+ reg = <1>;
+ label = "lan1";
+ };
+ port@2 {
+ status = "off";
+ reg = <2>;
+ label = "lan2";
+ };
+ port@3 {
+ status = "off";
+ reg = <3>;
+ label = "lan3";
+ };
+ port@4 {
+ status = "off";
+ reg = <4>;
+ label = "lan4";
+ };
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
};
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b38552697..000000000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
- "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
- these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
- setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
- compatible = "mediatek,mt7623-gsw";
- reg = <0 0x1b110000 0 0x300000>;
-
- interrupt-parent = <&pio>;
- interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
- clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
- <&ethsys CLK_ETHSYS_ESW>,
- <&ethsys CLK_ETHSYS_GP2>,
- <&ethsys CLK_ETHSYS_GP1>;
- clock-names = "trgpll", "esw", "gp2", "gp1";
-
- mt7530-supply = <&mt6323_vpa_reg>;
-
- mediatek,pctl-regmap = <&syscfg_pctl_a>;
- mediatek,reset-pin = <&pio 15 0>;
-
- status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c7a96c..000000000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
- bool "MediaTek ethernet driver - staging version"
- depends on RALINK
- ---help---
- If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
- prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
- bool "MT7621"
- depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
- tristate "MediaTek SoC Gigabit Ethernet support"
- depends on NET_VENDOR_MEDIATEK_STAGING
- select PHYLIB
- ---help---
- This driver supports the gigabit ethernet MACs in the
- MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
- def_bool NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
- select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
- def_bool NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
- select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
- def_tristate NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3596b3..000000000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y += mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4b4cd4..000000000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
- in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
- is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e2c987..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
- char name[ETH_GSTRING_LEN];
- unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
- .name = #stat, \
- .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
- MTK_HW_STAT(tx_bytes),
- MTK_HW_STAT(tx_packets),
- MTK_HW_STAT(tx_skip),
- MTK_HW_STAT(tx_collisions),
- MTK_HW_STAT(rx_bytes),
- MTK_HW_STAT(rx_packets),
- MTK_HW_STAT(rx_overflow),
- MTK_HW_STAT(rx_fcs_errors),
- MTK_HW_STAT(rx_short_errors),
- MTK_HW_STAT(rx_long_errors),
- MTK_HW_STAT(rx_checksum_errors),
- MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
- }
-
- phy_ethtool_ksettings_get(mac->phy_dev, cmd);
- return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
- if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
- mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
- mac->phy_flags = MTK_PHY_FLAG_PORT;
- } else if (mac->hw->mii_bus) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->base.phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- mac->phy_flags = MTK_PHY_FLAG_ATTACH;
- } else {
- return -ENODEV;
- }
- }
-
- return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_soc_data *soc = mac->hw->soc;
-
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
- if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
- info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -EOPNOTSUPP;
-
- return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (!mac->phy_dev)
- goto out_get_link;
-
- if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
- err = genphy_update_link(mac->phy_dev);
- if (err)
- goto out_get_link;
- }
-
- return mac->phy_dev->link;
-
-out_get_link:
- return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if ((ring->tx_pending < 2) ||
- (ring->rx_pending < 2) ||
- (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
- (ring->tx_pending > mac->hw->soc->dma_ring_size))
- return -EINVAL;
-
- dev->netdev_ops->ndo_stop(dev);
-
- mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
- mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
- return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- ring->rx_max_pending = mac->hw->soc->dma_ring_size;
- ring->tx_max_pending = mac->hw->soc->dma_ring_size;
- ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
- ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < MTK_HW_STATS_LEN; i++) {
- memcpy(data, mtk_ethtool_hw_stats[i].name,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return MTK_HW_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_hw_stats *hwstats = mac->hw_stats;
- unsigned int start;
- int i;
-
- if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hwstats->stats_lock)) {
- mtk_stats_update_mac(mac);
- spin_unlock(&hwstats->stats_lock);
- }
- }
-
- do {
- start = u64_stats_fetch_begin_irq(&hwstats->syncp);
- for (i = 0; i < MTK_HW_STATS_LEN; i++)
- data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
- } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
- .get_link_ksettings = mtk_get_link_ksettings,
- .set_link_ksettings = mtk_set_link_ksettings,
- .get_drvinfo = mtk_get_drvinfo,
- .get_msglevel = mtk_get_msglevel,
- .set_msglevel = mtk_set_msglevel,
- .nway_reset = mtk_nway_reset,
- .get_link = mtk_get_link,
- .set_ringparam = mtk_set_ringparam,
- .get_ringparam = mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
- struct mtk_mac *mac = netdev_priv(netdev);
- struct mtk_soc_data *soc = mac->hw->soc;
-
- if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
- mtk_ethtool_ops.get_strings = mtk_get_strings;
- mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
- mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
- }
-
- netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469aea6c..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e5481952..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT (5 * HZ)
-
-#define MT7620_GSW_REG_PIAC 0x0004
-
-#define GSW_NUM_VLANS 16
-#define GSW_NUM_VIDS 4096
-#define GSW_NUM_PORTS 7
-#define GSW_PORT6 6
-
-#define GSW_MDIO_ACCESS BIT(31)
-#define GSW_MDIO_READ BIT(19)
-#define GSW_MDIO_WRITE BIT(18)
-#define GSW_MDIO_START BIT(16)
-#define GSW_MDIO_ADDR_SHIFT 20
-#define GSW_MDIO_REG_SHIFT 25
-
-#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0 0x3fE4
-#define GSW_REG_SMACCR1 0x3fE8
-#define GSW_REG_CKGCR 0x3ff0
-
-#define GSW_REG_IMR 0x7008
-#define GSW_REG_ISR 0x700c
-#define GSW_REG_GPC1 0x7014
-
-#define SYSC_REG_CHIP_REV_ID 0x0c
-#define SYSC_REG_CFG 0x10
-#define SYSC_REG_CFG1 0x14
-#define RST_CTRL_MCM BIT(2)
-#define SYSC_PAD_RGMII2_MDIO 0x58
-#define SYSC_GPIO_MODE 0x60
-
-#define PORT_IRQ_ST_CHG 0x7f
-
-#define MT7621_ESW_PHY_POLLING 0x0000
-#define MT7620_ESW_PHY_POLLING 0x7000
-
-#define PMCR_IPG BIT(18)
-#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
-#define PMCR_BACKOFF BIT(9)
-#define PMCR_BACKPRES BIT(8)
-#define PMCR_RX_FC BIT(5)
-#define PMCR_TX_FC BIT(4)
-#define PMCR_SPEED(_x) (_x << 2)
-#define PMCR_DUPLEX BIT(1)
-#define PMCR_LINK BIT(0)
-
-#define PHY_AN_EN BIT(31)
-#define PHY_PRE_EN BIT(30)
-#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0 0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0 0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR 0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR 0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT 0x7a54
-#define MT7530_TRGMII_TD1_ODT 0x7a5c
-#define MT7530_TRGMII_TD2_ODT 0x7a64
-#define MT7530_TRGMII_TD3_ODT 0x7a6c
-#define MT7530_TRGMII_TD4_ODT 0x7a74
-#define MT7530_TRGMII_TD5_ODT 0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL 0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL 0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR 0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR 0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL 0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP 0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP 0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS 0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN 0x7008
-/* system control register */
-#define MT7530_SYS_CTRL 0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT 6
-#define MT7621_XTAL_MASK 0x7
-#define MT7621_XTAL_25 6
-#define MT7621_XTAL_40 3
-#define MT7621_MDIO_DRV_MASK (3 << 4)
-#define MT7621_GE1_MODE_MASK (3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV BIT(30)
-#define P6ECR_INTF_MODE_RGMII BIT(1)
-#define P5RGMIIRXCR_C_ALIGN BIT(8)
-#define P5RGMIIRXCR_DELAY_2 BIT(1)
-#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT 9
-#define HWTRAP_XTAL_MASK 0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST BIT(1)
-#define SYS_CTRL_REG_RST BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96 BIT(18)
-#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
-#define PMCR_BACK_PRES_EN BIT(9)
-#define PMCR_BACKOFF_EN BIT(8)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
-#define PMCR_FORCE_SPEED_1000 BIT(3)
-#define PMCR_FORCE_FDX BIT(1)
-#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
- PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
- PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
- PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE 0x390
-#define GSW_TRGMII_TD0_ODT 0x354
-#define GSW_TRGMII_TD1_ODT 0x35c
-#define GSW_TRGMII_TD2_ODT 0x364
-#define GSW_TRGMII_TD3_ODT 0x36c
-#define GSW_TRGMII_TXCTL_ODT 0x374
-#define GSW_TRGMII_TCK_ODT 0x37c
-#define GSW_TRGMII_RCK_CTRL 0x300
-
-#define INTF_MODE_TRGMII BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR 0x200
-#define MTK_MAC_P1_MCR 0x100
-
-#define MAC_MCR_MAX_RX_2K BIT(29)
-#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE BIT(15)
-#define MAC_MCR_TX_EN BIT(14)
-#define MAC_MCR_RX_EN BIT(13)
-#define MAC_MCR_BACKOFF_EN BIT(9)
-#define MAC_MCR_BACKPR_EN BIT(8)
-#define MAC_MCR_FORCE_RX_FC BIT(5)
-#define MAC_MCR_FORCE_TX_FC BIT(4)
-#define MAC_MCR_SPEED_1000 BIT(3)
-#define MAC_MCR_FORCE_DPX BIT(1)
-#define MAC_MCR_FORCE_LINK BIT(0)
-#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
- MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
- MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
- MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
- MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define MT7623_XTAL_40 0
-#define MT7623_XTAL_20 1
-#define MT7623_XTAL_25 3
-
-/* GPIO port control registers */
-#define GPIO_OD33_CTRL8 0x4c0
-#define GPIO_BIAS_CTRL 0xed0
-#define GPIO_DRV_SEL10 0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
- PORT4_EPHY = 0,
- PORT4_EXT,
-};
-
-/* struct mt7620_gsw - the structure that holds the SoC specific data
- * @dev: The Device struct
- * @base: The base address
- * @piac_offset: The PIAC base may change depending on SoC
- * @irq: The IRQ we are using
- * @port4: The port4 mode on MT7620
- * @autopoll: Is MDIO autopolling enabled
- * @ethsys: The ethsys register map
- * @pctl: The pin control register map
- * @clk_gsw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
- * @clk_trgpll: The trgmii pll clock
- */
-struct mt7620_gsw {
- struct device *dev;
- void __iomem *base;
- u32 piac_offset;
- int irq;
- int port4;
- unsigned long int autopoll;
-
- struct regmap *ethsys;
- struct regmap *pctl;
-
- struct clk *clk_gsw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
- struct clk *clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
- int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
- u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b17bad9..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
- iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
- return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
- struct mtk_eth *eth = (struct mtk_eth *)_eth;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
- u32 reg, i;
-
- reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
- for (i = 0; i < 5; i++) {
- unsigned int link;
-
- if ((reg & BIT(i)) == 0)
- continue;
-
- link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
- if (link == eth->link[i])
- continue;
-
- eth->link[i] = link;
- if (link)
- netdev_info(*eth->netdev,
- "port %d link up\n", i);
- else
- netdev_info(*eth->netdev,
- "port %d link down\n", i);
- }
-
- mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
- return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
- struct device_node *np)
-{
- u32 i;
- u32 val;
-
- /* hardware reset the switch */
- mtk_reset(eth, RST_CTRL_MCM);
- mdelay(10);
-
- /* reduce RGMII2 PAD driving strength */
- rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
- /* gpio mux - RGMII1=Normal mode */
- rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
- /* set GMAC1 RGMII mode */
- rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
- /* enable MDIO to control MT7530 */
- rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
- /* turn off all PHYs */
- for (i = 0; i <= 4; i++) {
- val = _mt7620_mii_read(gsw, i, 0x0);
- val |= BIT(11);
- _mt7620_mii_write(gsw, i, 0x0, val);
- }
-
- /* reset the switch */
- mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
- SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
- usleep_range(10, 20);
-
- if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
- /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
- mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
- mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
- } else {
- /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
- mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
- mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
- }
-
- /* GE2, Link down */
- mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
- /* Enable Port 6, P5 as GMAC5, P5 disable */
- val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
- /* Enable Port 6 */
- val &= ~MHWTRAP_P6_DIS;
- /* Disable Port 5 */
- val |= MHWTRAP_P5_DIS;
- /* manual override of HW-Trap */
- val |= MHWTRAP_MANUAL;
- mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
- val = rt_sysc_r32(SYSC_REG_CFG);
- val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
- if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
- /* 40Mhz */
-
- /* disable MT7530 core clock */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x410);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x0);
-
- /* disable MT7530 PLL */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40d);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x2020);
-
- /* for MT7530 core clock = 500Mhz */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40e);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x119);
-
- /* enable MT7530 PLL */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40d);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x2820);
-
- usleep_range(20, 40);
-
- /* enable MT7530 core clock */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x410);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- }
-
- /* RGMII */
- _mt7620_mii_write(gsw, 0, 14, 0x1);
-
- /* set MT7530 central align */
- mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
- mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
- MT7530_TRGMII_TXCTRL);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
- /* delay setting for 10/1000M */
- mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
- P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
- mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
- /* lower Tx Driving*/
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
- /* turn on all PHYs */
- for (i = 0; i <= 4; i++) {
- val = _mt7620_mii_read(gsw, i, 0);
- val &= ~BIT(11);
- _mt7620_mii_write(gsw, i, 0, val);
- }
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT 6
-
- /* This is copied from mt7530_apply_config in libreCMC driver */
- {
- int i;
-
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
- 0x00ff0000);
-
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
- }
-
- /* enable irq */
- mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
- mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
- { .compatible = "mediatek,mt7621-gsw" },
- {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
- struct device_node *np = eth->switch_np;
- struct platform_device *pdev = of_find_device_by_node(np);
- struct mt7620_gsw *gsw;
-
- if (!pdev)
- return -ENODEV;
-
- if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
- return -EINVAL;
-
- gsw = platform_get_drvdata(pdev);
- eth->sw_priv = gsw;
-
- if (!gsw->irq)
- return -EINVAL;
-
- request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
- "gsw", eth);
- disable_irq(gsw->irq);
-
- mt7621_hw_init(eth, gsw, np);
-
- enable_irq(gsw->irq);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct mt7620_gsw *gsw;
-
- gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
- if (!gsw)
- return -ENOMEM;
-
- gsw->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gsw->base))
- return PTR_ERR(gsw->base);
-
- gsw->dev = &pdev->dev;
- gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
- platform_set_drvdata(pdev, gsw);
-
- return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver gsw_driver = {
- .probe = mt7621_gsw_probe,
- .remove = mt7621_gsw_remove,
- .driver = {
- .name = "mt7621-gsw",
- .of_match_table = mediatek_gsw_match,
- },
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a447eed..000000000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
- /* TODO */
- return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
- struct mtk_eth *eth = netdev_priv(dev);
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&eth->phy->lock, flags);
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_node[i]) {
- struct phy_device *phydev = eth->phy->phy[i];
- int status_change = 0;
-
- if (phydev->link)
- if (eth->phy->duplex[i] != phydev->duplex ||
- eth->phy->speed[i] != phydev->speed)
- status_change = 1;
-
- if (phydev->link != eth->link[i])
- status_change = 1;
-
- switch (phydev->speed) {
- case SPEED_1000:
- case SPEED_100:
- case SPEED_10:
- eth->link[i] = phydev->link;
- eth->phy->duplex[i] = phydev->duplex;
- eth->phy->speed[i] = phydev->speed;
-
- if (status_change &&
- eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- break;
- }
- }
- }
- spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node)
-{
- const __be32 *_port = NULL;
- struct phy_device *phydev;
- int phy_mode, port;
-
- _port = of_get_property(phy_node, "reg", NULL);
-
- if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
- pr_err("%pOFn: invalid port id\n", phy_node);
- return -EINVAL;
- }
- port = be32_to_cpu(*_port);
- phy_mode = of_get_phy_mode(phy_node);
- if (phy_mode < 0) {
- dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
- eth->phy->phy_node[port] = NULL;
- return -EINVAL;
- }
-
- phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
- mtk_phy_link_adjust, 0, phy_mode);
- if (!phydev) {
- dev_err(eth->dev, "could not connect to PHY\n");
- eth->phy->phy_node[port] = NULL;
- return -ENODEV;
- }
-
- phydev->supported &= PHY_1000BT_FEATURES;
- phydev->advertising = phydev->supported;
-
- dev_info(eth->dev,
- "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
- port, phydev_name(phydev), phydev->phy_id,
- phydev->drv->name);
-
- eth->phy->phy[port] = phydev;
- eth->link[port] = 0;
-
- return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
- struct phy_device *phy)
-{
- phy_attach(eth->netdev[mac->id], phydev_name(phy),
- PHY_INTERFACE_MODE_MII);
-
- phy->autoneg = AUTONEG_ENABLE;
- phy->speed = 0;
- phy->duplex = 0;
- phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
- phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- int i;
-
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_node[i]) {
- if (!mac->phy_dev) {
- mac->phy_dev = eth->phy->phy[i];
- mac->phy_flags = MTK_PHY_FLAG_PORT;
- }
- } else if (eth->mii_bus) {
- struct phy_device *phy;
-
- phy = mdiobus_get_phy(eth->mii_bus, i);
- if (phy) {
- phy_init(eth, mac, phy);
- if (!mac->phy_dev) {
- mac->phy_dev = phy;
- mac->phy_flags = MTK_PHY_FLAG_ATTACH;
- }
- }
- }
- }
-
- return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++)
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 0;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_disconnect(eth->phy->phy[i]);
- } else if (eth->mii_bus) {
- struct phy_device *phy =
- mdiobus_get_phy(eth->mii_bus, i);
-
- if (phy)
- phy_detach(phy);
- }
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 1;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_start(eth->phy->phy[i]);
- }
- }
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++)
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 0;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_stop(eth->phy->phy[i]);
- }
-}
-
-static struct mtk_phy phy_ralink = {
- .connect = mtk_phy_connect,
- .disconnect = mtk_phy_disconnect,
- .start = mtk_phy_start,
- .stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
- struct device_node *mii_np;
- int err;
-
- if (!eth->soc->mdio_read || !eth->soc->mdio_write)
- return 0;
-
- spin_lock_init(&phy_ralink.lock);
- eth->phy = &phy_ralink;
-
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
- if (!mii_np) {
- dev_err(eth->dev, "no %s child node found", "mdio-bus");
- return -ENODEV;
- }
-
- if (!of_device_is_available(mii_np)) {
- err = 0;
- goto err_put_node;
- }
-
- eth->mii_bus = mdiobus_alloc();
- if (!eth->mii_bus) {
- err = -ENOMEM;
- goto err_put_node;
- }
-
- eth->mii_bus->name = "mdio";
- eth->mii_bus->read = eth->soc->mdio_read;
- eth->mii_bus->write = eth->soc->mdio_write;
- eth->mii_bus->reset = mtk_mdio_reset;
- eth->mii_bus->priv = eth;
- eth->mii_bus->parent = eth->dev;
-
- snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- kfree(eth->mii_bus);
-err_put_node:
- of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
- if (!eth->mii_bus)
- return;
-
- mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e23842a01..000000000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c2914e..000000000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
- unsigned long t_start = jiffies;
-
- while (1) {
- if (!(mtk_switch_r32(gsw,
- gsw->piac_offset + MT7620_GSW_REG_PIAC) &
- GSW_MDIO_ACCESS))
- return 0;
- if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
- break;
- }
-
- dev_err(gsw->dev, "mdio: MDIO timeout\n");
- return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
- u32 phy_register, u32 write_data)
-{
- if (mt7620_mii_busy_wait(gsw))
- return -1;
-
- write_data &= 0xffff;
-
- mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
- (phy_register << GSW_MDIO_REG_SHIFT) |
- (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
- MT7620_GSW_REG_PIAC);
-
- if (mt7620_mii_busy_wait(gsw))
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
- u32 d;
-
- if (mt7620_mii_busy_wait(gsw))
- return 0xffff;
-
- mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
- (phy_reg << GSW_MDIO_REG_SHIFT) |
- (phy_addr << GSW_MDIO_ADDR_SHIFT),
- MT7620_GSW_REG_PIAC);
-
- if (mt7620_mii_busy_wait(gsw))
- return 0xffff;
-
- d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
- return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
- struct mtk_eth *eth = bus->priv;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
- return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
- struct mtk_eth *eth = bus->priv;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
- return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
- _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
- _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
- _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
- u16 high, low;
-
- _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
- low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
- high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
- return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
- u32 val = mt7530_mdio_r32(gsw, reg);
-
- val &= ~mask;
- val |= set;
- mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
- switch (speed) {
- case 2:
- case SPEED_1000:
- return "1000";
- case 1:
- case SPEED_100:
- return "100";
- case 0:
- case SPEED_10:
- return "10";
- }
-
- return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
- int i;
-
- for (i = 0; i < GSW_PORT6; i++)
- if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
- return 1;
- return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
- int speed, int duplex)
-{
- struct mt7620_gsw *gsw = eth->sw_priv;
-
- if (link)
- dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
- port, mtk_speed_str(speed),
- (duplex) ? "Full" : "Half");
- else
- dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
- mt7620_print_link_state(eth, port, eth->link[port],
- eth->phy->speed[port],
- (eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 6027b19f7bc2..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2176 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define MAX_RX_LENGTH 1536
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC 0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
- (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL 0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
- [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
- [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
- [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
- [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
- [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
- [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
- [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
- [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
- [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
- [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
- [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
- [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
- [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
- [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
- [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
- [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
- __raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
- return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
- mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
- return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
- u32 val;
-
- regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
- val |= reset_bits;
- regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
- usleep_range(10, 20);
- val &= ~reset_bits;
- regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
- usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
- u32 status = 0;
-
- if (eth->soc->dma_type & MTK_PDMA)
- status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
- if (eth->soc->dma_type & MTK_QDMA)
- status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
- return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
- u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
- if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
- status_reg = MTK_REG_MTK_INT_STATUS2;
-
- mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
- u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
- if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
- status_reg = MTK_REG_MTK_INT_STATUS2;
-
- return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
- u32 val;
-
- if (eth->soc->dma_type & MTK_PDMA) {
- val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
- /* flush write */
- mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
- /* flush write */
- mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- }
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
- u32 val;
-
- if (eth->soc->dma_type & MTK_PDMA) {
- val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
- /* flush write */
- mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
- /* flush write */
- mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- }
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
- u32 enabled = 0;
-
- if (eth->soc->dma_type & MTK_PDMA)
- enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- if (eth->soc->dma_type & MTK_QDMA)
- enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
- return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
- unsigned char *macaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mac->hw->page_lock, flags);
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5],
- MTK_GDMA1_MAC_ADRL);
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
- int ret = eth_mac_addr(dev, p);
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- if (ret)
- return ret;
-
- if (eth->soc->set_mac)
- eth->soc->set_mac(mac, dev->dev_addr);
- else
- mtk_hw_set_macaddr(mac, p);
-
- return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
- /* make sure buf_size will be at least MAX_RX_LENGTH */
- if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
- mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
- return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
- int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- WARN_ON(buf_size < MAX_RX_LENGTH);
-
- return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
-{
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
- rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
- rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
- rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
- struct mtk_tx_dma *dma_txd)
-{
- WRITE_ONCE(dma_txd->txd1, txd->txd1);
- WRITE_ONCE(dma_txd->txd3, txd->txd3);
- WRITE_ONCE(dma_txd->txd4, txd->txd4);
- /* clean dma done flag last */
- WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
- int i;
-
- if (ring->rx_data && ring->rx_dma) {
- for (i = 0; i < ring->rx_ring_size; i++) {
- if (!ring->rx_data[i])
- continue;
- if (!ring->rx_dma[i].rxd1)
- continue;
- dma_unmap_single(eth->dev,
- ring->rx_dma[i].rxd1,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- skb_free_frag(ring->rx_data[i]);
- }
- kfree(ring->rx_data);
- ring->rx_data = NULL;
- }
-
- if (ring->rx_dma) {
- dma_free_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- ring->rx_dma,
- ring->rx_phys);
- ring->rx_dma = NULL;
- }
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
- int i, pad = 0;
-
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
- ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
- ring->rx_ring_size = eth->soc->dma_ring_size;
- ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
- GFP_KERNEL);
- if (!ring->rx_data)
- goto no_rx_mem;
-
- for (i = 0; i < ring->rx_ring_size; i++) {
- ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
- if (!ring->rx_data[i])
- goto no_rx_mem;
- }
-
- ring->rx_dma =
- dma_alloc_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
- if (!ring->rx_dma)
- goto no_rx_mem;
-
- if (!eth->soc->rx_2b_offset)
- pad = NET_IP_ALIGN;
-
- for (i = 0; i < ring->rx_ring_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->rx_data[i] + NET_SKB_PAD + pad,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- goto no_rx_mem;
- ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
- if (eth->soc->rx_sg_dma)
- ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
- else
- ring->rx_dma[i].rxd2 = RX_DMA_LSO;
- }
- ring->rx_calc_idx = ring->rx_ring_size - 1;
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- return 0;
-
-no_rx_mem:
- return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- }
- if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
- dma_unmap_page(dev,
- dma_unmap_addr(tx_buf, dma_addr1),
- dma_unmap_len(tx_buf, dma_len1),
- DMA_TO_DEVICE);
-
- tx_buf->flags = 0;
- if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i;
-
- if (ring->tx_buf) {
- for (i = 0; i < ring->tx_ring_size; i++)
- mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
- kfree(ring->tx_buf);
- ring->tx_buf = NULL;
- }
-
- if (ring->tx_dma) {
- dma_free_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- ring->tx_dma,
- ring->tx_phys);
- ring->tx_dma = NULL;
- }
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i;
-
- if (ring->tx_buf) {
- for (i = 0; i < ring->tx_ring_size; i++)
- mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
- kfree(ring->tx_buf);
- ring->tx_buf = NULL;
- }
-
- if (ring->tx_dma) {
- dma_free_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- ring->tx_dma,
- ring->tx_phys);
- ring->tx_dma = NULL;
- }
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
- struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
- u64 stats;
-
- base += hw_stats->reg_offset;
-
- u64_stats_update_begin(&hw_stats->syncp);
-
- if (mac->hw->soc->new_stats) {
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
- } else {
- hw_stats->tx_bytes += mtk_r32(mac->hw, base);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x3c);
- }
-
- u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
- unsigned int start;
-
- if (!base) {
- netdev_stats_to_stats64(storage, &dev->stats);
- return;
- }
-
- if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hw_stats->stats_lock)) {
- mtk_stats_update_mac(mac);
- spin_unlock(&hw_stats->stats_lock);
- }
- }
-
- do {
- start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
- storage->rx_packets = hw_stats->rx_packets;
- storage->tx_packets = hw_stats->tx_packets;
- storage->rx_bytes = hw_stats->rx_bytes;
- storage->tx_bytes = hw_stats->tx_bytes;
- storage->collisions = hw_stats->tx_collisions;
- storage->rx_length_errors = hw_stats->rx_short_errors +
- hw_stats->rx_long_errors;
- storage->rx_over_errors = hw_stats->rx_overflow;
- storage->rx_crc_errors = hw_stats->rx_fcs_errors;
- storage->rx_errors = hw_stats->rx_checksum_errors;
- storage->tx_aborted_errors = hw_stats->tx_skip;
- } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
- storage->tx_errors = dev->stats.tx_errors;
- storage->rx_dropped = dev->stats.rx_dropped;
- storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 idx = (vid & 0xf);
- u32 vlan_cfg;
-
- if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
- return 0;
-
- if (test_bit(idx, &eth->vlan_map)) {
- netdev_warn(dev, "disable tx vlan offload\n");
- dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
- netdev_update_features(dev);
- } else {
- vlan_cfg = mtk_r32(eth,
- mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- ((idx >> 1) << 2));
- if (idx & 0x1) {
- vlan_cfg &= 0xffff;
- vlan_cfg |= (vid << 16);
- } else {
- vlan_cfg &= 0xffff0000;
- vlan_cfg |= vid;
- }
- mtk_w32(eth,
- vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- ((idx >> 1) << 2));
- set_bit(idx, &eth->vlan_map);
- }
-
- return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 idx = (vid & 0xf);
-
- if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
- return 0;
-
- clear_bit(idx, &eth->vlan_map);
-
- return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
- barrier();
- return (u32)(ring->tx_ring_size -
- ((ring->tx_next_idx - ring->tx_free_idx) &
- (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
- unsigned int len;
- int ret;
-
- if (unlikely(skb->len >= VLAN_ETH_ZLEN))
- return 0;
-
- if (eth->soc->padding_64b && !eth->soc->padding_bug)
- return 0;
-
- if (skb_vlan_tag_present(skb))
- len = ETH_ZLEN;
- else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- len = VLAN_ETH_ZLEN;
- else if (!eth->soc->padding_64b)
- len = ETH_ZLEN;
- else
- return 0;
-
- if (skb->len >= len)
- return 0;
-
- ret = skb_pad(skb, len - skb->len);
- if (ret < 0)
- return ret;
- skb->len = len;
- skb_set_tail_pointer(skb, len);
-
- return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct skb_frag_struct *frag;
- struct mtk_tx_dma txd, *ptxd;
- struct mtk_tx_buf *tx_buf;
- int i, j, k, frag_size, frag_map_size, offset;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
- u32 def_txd4;
-
- if (mtk_skb_padto(skb, eth)) {
- netif_warn(eth, tx_err, dev, "tx padding failed!\n");
- return -1;
- }
-
- tx_buf = &ring->tx_buf[ring->tx_next_idx];
- memset(tx_buf, 0, sizeof(*tx_buf));
- memset(&txd, 0, sizeof(txd));
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- /* init tx descriptor */
- def_txd4 = eth->soc->txd4;
- txd.txd4 = def_txd4;
-
- if (eth->soc->mac_count > 1)
- txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
- if (gso)
- txd.txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd.txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb)) {
- u16 tag = skb_vlan_tag_get(skb);
-
- txd.txd4 |= TX_DMA_INS_VLAN |
- ((tag >> VLAN_PRIO_SHIFT) << 4) |
- (tag & 0xF);
- }
-
- mapped_addr = dma_map_single(&dev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- return -1;
-
- txd.txd1 = mapped_addr;
- txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
- /* TX SG offload */
- j = ring->tx_next_idx;
- k = 0;
- for (i = 0; i < nr_frags; i++) {
- offset = 0;
- frag = &skb_shinfo(skb)->frags[i];
- frag_size = skb_frag_size(frag);
-
- while (frag_size > 0) {
- frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (k & 0x1) {
- j = NEXT_TX_DESP_IDX(j);
- txd.txd1 = mapped_addr;
- txd.txd2 = TX_DMA_PLEN0(frag_map_size);
- txd.txd4 = def_txd4;
-
- tx_buf = &ring->tx_buf[j];
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- dma_unmap_addr_set(tx_buf, dma_addr0,
- mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0,
- frag_map_size);
- } else {
- txd.txd3 = mapped_addr;
- txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
- tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
- tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
- dma_unmap_addr_set(tx_buf, dma_addr1,
- mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len1,
- frag_map_size);
-
- if (!((i == (nr_frags - 1)) &&
- (frag_map_size == frag_size))) {
- mtk_set_txd_pdma(&txd,
- &ring->tx_dma[j]);
- memset(&txd, 0, sizeof(txd));
- }
- }
- frag_size -= frag_map_size;
- offset += frag_map_size;
- k++;
- }
- }
-
- /* set last segment */
- if (k & 0x1)
- txd.txd2 |= TX_DMA_LS1;
- else
- txd.txd2 |= TX_DMA_LS0;
- mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
- /* store skb to cleanup */
- tx_buf->skb = skb;
-
- netdev_sent_queue(dev, skb->len);
- skb_tx_timestamp(skb);
-
- ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
- mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
- return 0;
-
-err_dma:
- j = ring->tx_next_idx;
- for (i = 0; i < tx_num; i++) {
- ptxd = &ring->tx_dma[j];
- tx_buf = &ring->tx_buf[j];
-
- /* unmap dma */
- mtk_txd_unmap(&dev->dev, tx_buf);
-
- ptxd->txd2 = TX_DMA_DESP2_DEF;
- j = NEXT_TX_DESP_IDX(j);
- }
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
- return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
- dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
- int cnt = eth->soc->dma_ring_size;
- int i;
-
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
- GFP_ATOMIC | __GFP_ZERO);
- if (unlikely(!eth->scratch_ring))
- return -ENOMEM;
-
- eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
- GFP_KERNEL);
- dma_addr = dma_map_single(eth->dev,
- eth->scratch_head, cnt * QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- return -ENOMEM;
-
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
- for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
- if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
- }
-
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
- return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
- void *ret = ring->tx_dma;
-
- return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
-{
- return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
-{
- int idx = txd - ring->tx_dma;
-
- return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_dma *itxd, *txd;
- struct mtk_tx_buf *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
- int i, n_desc = 1;
- u32 txd4 = eth->soc->txd4;
-
- itxd = ring->tx_next_free;
- if (itxd == ring->tx_last_free)
- return -ENOMEM;
-
- if (eth->soc->mac_count > 1)
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(&dev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- return -ENOMEM;
-
- WRITE_ONCE(itxd->txd1, mapped_addr);
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
- /* TX SG offload */
- txd = itxd;
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- unsigned int offset = 0;
- int frag_size = skb_frag_size(frag);
-
- while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
-
- txd = mtk_tx_next_qdma(ring, txd);
- if (txd == ring->tx_last_free)
- goto err_dma;
-
- n_desc++;
- frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
-
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0) |
- mac->id);
- WRITE_ONCE(txd->txd4, 0);
-
- tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
- frag_size -= frag_map_size;
- offset += frag_map_size;
- }
- }
-
- /* store skb to cleanup */
- tx_buf->skb = skb;
-
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
-
- netdev_sent_queue(dev, skb->len);
- skb_tx_timestamp(skb);
-
- ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
- atomic_sub(n_desc, &ring->tx_free_count);
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
- return 0;
-
-err_dma:
- do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
- /* unmap dma */
- mtk_txd_unmap(&dev->dev, tx_buf);
-
- itxd->txd3 = TX_DMA_DESP2_DEF;
- itxd = mtk_tx_next_qdma(ring, itxd);
- } while (itxd != txd);
-
- return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
- int i, nfrags;
- struct skb_frag_struct *frag;
-
- nfrags = 1;
- if (skb_is_gso(skb)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
- }
- } else {
- nfrags += skb_shinfo(skb)->nr_frags;
- }
-
- return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct net_device_stats *stats = &dev->stats;
- int tx_num;
- int len = skb->len;
- bool gso = false;
-
- tx_num = mtk_cal_txd_req(skb);
- if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
- netif_stop_queue(dev);
- netif_err(eth, tx_queued, dev,
- "Tx Ring full when queue awake!\n");
- return NETDEV_TX_BUSY;
- }
-
- /* TSO: fill MSS info in tcp checksum field */
- if (skb_is_gso(skb)) {
- if (skb_cow_head(skb, 0)) {
- netif_warn(eth, tx_err, dev,
- "GSO expand head fail.\n");
- goto drop;
- }
-
- if (skb_shinfo(skb)->gso_type &
- (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- gso = true;
- tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
- }
- }
-
- if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
- goto drop;
-
- stats->tx_packets++;
- stats->tx_bytes += len;
-
- if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
- netif_stop_queue(dev);
- smp_mb();
- if (unlikely(atomic_read(&ring->tx_free_count) >
- ring->tx_thresh))
- netif_wake_queue(dev);
- }
-
- return NETDEV_TX_OK;
-
-drop:
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth, u32 rx_intr)
-{
- struct mtk_soc_data *soc = eth->soc;
- struct mtk_rx_ring *ring = &eth->rx_ring[0];
- int idx = ring->rx_calc_idx;
- u32 checksum_bit;
- struct sk_buff *skb;
- u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
- int done = 0, pad;
-
- if (eth->soc->hw_features & NETIF_F_RXCSUM)
- checksum_bit = soc->checksum_bit;
- else
- checksum_bit = 0;
-
- if (eth->soc->rx_2b_offset)
- pad = 0;
- else
- pad = NET_IP_ALIGN;
-
- while (done < budget) {
- struct net_device *netdev;
- unsigned int pktlen;
- dma_addr_t dma_addr;
- int mac = 0;
-
- idx = NEXT_RX_DESP_IDX(idx);
- rxd = &ring->rx_dma[idx];
- data = ring->rx_data[idx];
-
- mtk_get_rxd(&trxd, rxd);
- if (!(trxd.rxd2 & RX_DMA_DONE))
- break;
-
- /* find out which mac the packet come from. values start at 1 */
- if (eth->soc->mac_count > 1) {
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
- if (mac < 0 || mac >= eth->soc->mac_count)
- goto release_desc;
- }
-
- netdev = eth->netdev[mac];
-
- /* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
- if (unlikely(!new_data || !netdev)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(&netdev->dev,
- new_data + NET_SKB_PAD + pad,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
- skb_free_frag(new_data);
- goto release_desc;
- }
-
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
- goto release_desc;
- }
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
- dma_unmap_single(&netdev->dev, trxd.rxd1,
- ring->rx_buf_size, DMA_FROM_DEVICE);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
- skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & checksum_bit)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, netdev);
-
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += pktlen;
-
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- RX_DMA_VID(trxd.rxd3))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
- napi_gro_receive(napi, skb);
-
- ring->rx_data[idx] = new_data;
- rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
- if (eth->soc->rx_sg_dma)
- rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
- else
- rxd->rxd2 = RX_DMA_LSO;
-
- ring->rx_calc_idx = idx;
- /* make sure that all changes to the dma ring are flushed before
- * we continue
- */
- wmb();
- if (eth->soc->dma_type == MTK_QDMA)
- mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
- else
- mtk_reg_w32(eth, ring->rx_calc_idx,
- MTK_REG_RX_CALC_IDX0);
- done++;
- }
-
- if (done < budget)
- mtk_irq_ack(eth, rx_intr);
-
- return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
- struct sk_buff *skb;
- struct mtk_tx_buf *tx_buf;
- int done = 0;
- u32 idx, hwidx;
- struct mtk_tx_ring *ring = &eth->tx_ring;
- unsigned int bytes = 0;
-
- idx = ring->tx_free_idx;
- hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
- while ((idx != hwidx) && budget) {
- tx_buf = &ring->tx_buf[idx];
- skb = tx_buf->skb;
-
- if (!skb)
- break;
-
- if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
- bytes += skb->len;
- done++;
- budget--;
- }
- mtk_txd_unmap(eth->dev, tx_buf);
- idx = NEXT_TX_DESP_IDX(idx);
- }
- ring->tx_free_idx = idx;
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
- /* read hw index again make sure no new tx packet */
- if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
- *tx_again = 1;
-
- if (done)
- netdev_completed_queue(*eth->netdev, done, bytes);
-
- return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
- struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
- u32 cpu, dma;
- int i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
-
- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
- desc = mtk_qdma_phys_to_virt(ring, cpu);
-
- while ((cpu != dma) && budget) {
- u32 next_cpu = desc->txd2;
- int mac;
-
- desc = mtk_tx_next_qdma(ring, desc);
- if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
- break;
-
- mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
- TX_DMA_FPORT_MASK;
- mac--;
-
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
- skb = tx_buf->skb;
- if (!skb)
- break;
-
- if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
- budget--;
- }
- mtk_txd_unmap(eth->dev, tx_buf);
-
- ring->tx_last_free->txd2 = next_cpu;
- ring->tx_last_free = desc;
- atomic_inc(&ring->tx_free_count);
-
- cpu = next_cpu;
- }
-
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
- /* read hw index again make sure no new tx packet */
- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
- *tx_again = true;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- if (!done[i])
- continue;
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
- total += done[i];
- }
-
- return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
- bool *tx_again)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct net_device *netdev = eth->netdev[0];
- int done;
-
- done = eth->tx_ring.tx_poll(eth, budget, tx_again);
- if (!*tx_again)
- mtk_irq_ack(eth, tx_intr);
-
- if (!done)
- return 0;
-
- smp_mb();
- if (unlikely(!netif_queue_stopped(netdev)))
- return done;
-
- if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
- netif_wake_queue(netdev);
-
- return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- if (!eth->mac[i] || !eth->mac[i]->hw_stats)
- continue;
- if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
- mtk_stats_update_mac(eth->mac[i]);
- spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
- }
- }
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
- int tx_done, rx_done;
- bool tx_again = false;
-
- status = mtk_irq_pending(eth);
- mtk_status = mtk_irq_pending_status(eth);
- tx_intr = eth->soc->tx_int;
- rx_intr = eth->soc->rx_int;
- status_intr = eth->soc->status_int;
- tx_done = 0;
- rx_done = 0;
- tx_again = 0;
-
- if (status & tx_intr)
- tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
- if (status & rx_intr)
- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
- if (unlikely(mtk_status & status_intr)) {
- mtk_stats_update(eth);
- mtk_irq_ack_status(eth, status_intr);
- }
-
- if (unlikely(netif_msg_intr(eth))) {
- mask = mtk_irq_enabled(eth);
- netdev_info(eth->netdev[0],
- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
- tx_done, rx_done, status, mask);
- }
-
- if (tx_again || rx_done == budget)
- return budget;
-
- status = mtk_irq_pending(eth);
- if (status & (tx_intr | rx_intr))
- return budget;
-
- napi_complete(napi);
- mtk_irq_enable(eth, tx_intr | rx_intr);
-
- return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
- int i;
- struct mtk_tx_ring *ring = &eth->tx_ring;
-
- ring->tx_ring_size = eth->soc->dma_ring_size;
- ring->tx_free_idx = 0;
- ring->tx_next_idx = 0;
- ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
- MAX_SKB_FRAGS);
-
- ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
- if (!ring->tx_buf)
- goto no_tx_mem;
-
- ring->tx_dma =
- dma_alloc_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
- if (!ring->tx_dma)
- goto no_tx_mem;
-
- for (i = 0; i < ring->tx_ring_size; i++) {
- ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
- ring->tx_dma[i].txd4 = eth->soc->txd4;
- }
-
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
- ring->tx_map = mtk_pdma_tx_map;
- ring->tx_poll = mtk_pdma_tx_poll;
- ring->tx_clean = mtk_pdma_tx_clean;
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
- mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
- mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
- mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-
-no_tx_mem:
- return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->tx_dma);
-
- ring->tx_ring_size = eth->soc->dma_ring_size;
- ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
- if (!ring->tx_buf)
- goto no_tx_mem;
-
- ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
- &ring->tx_phys,
- GFP_ATOMIC | __GFP_ZERO);
- if (!ring->tx_dma)
- goto no_tx_mem;
-
- for (i = 0; i < ring->tx_ring_size; i++) {
- int next = (i + 1) % ring->tx_ring_size;
- u32 next_ptr = ring->tx_phys + next * sz;
-
- ring->tx_dma[i].txd2 = next_ptr;
- ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
- }
-
- atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
- ring->tx_next_free = &ring->tx_dma[0];
- ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
- ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
- MAX_SKB_FRAGS);
-
- ring->tx_map = mtk_qdma_tx_map;
- ring->tx_poll = mtk_qdma_tx_poll;
- ring->tx_clean = mtk_qdma_tx_clean;
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
- mtk_w32(eth,
- ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
- MTK_QTX_DRX_PTR);
-
- return 0;
-
-no_tx_mem:
- return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
- int err;
-
- err = mtk_init_fq_dma(eth);
- if (err)
- return err;
-
- err = mtk_qdma_tx_alloc_tx(eth);
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
- if (err)
- return err;
-
- mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
- mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
- /* Enable random early drop and set drop threshold automatically */
- mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
- return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
- int err = mtk_qdma_init(eth, 1);
-
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
- if (err)
- return err;
-
- mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
- mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
- mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
- mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
- struct mtk_rx_ring *ring = &eth->rx_ring[0];
- int err;
-
- err = mtk_pdma_tx_alloc(eth);
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, ring);
- if (err)
- return err;
-
- mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
- mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
- mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
- mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
- eth->tx_ring.tx_clean(eth);
- mtk_clean_rx(eth, &eth->rx_ring[0]);
- mtk_clean_rx(eth, &eth->rx_ring[1]);
- kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = &eth->tx_ring;
-
- eth->netdev[mac->id]->stats.tx_errors++;
- netif_err(eth, tx_err, dev,
- "transmit timed out\n");
- if (eth->soc->dma_type & MTK_PDMA) {
- netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
- mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
- netif_info(eth, drv, dev,
- "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
- 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
- mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
- mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
- mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
- ring->tx_free_idx,
- ring->tx_next_idx);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
- mtk_r32(eth, MTK_QDMA_GLO_CFG));
- netif_info(eth, drv, dev,
- "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
- 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
- mtk_r32(eth, MTK_QTX_DTX_PTR),
- mtk_r32(eth, MTK_QTX_CRX_PTR),
- mtk_r32(eth, MTK_QTX_DRX_PTR),
- atomic_read(&ring->tx_free_count));
- }
- netif_info(eth, drv, dev,
- "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
- 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
- mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
- mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
- mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
- schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
- struct mtk_eth *eth = _eth;
- u32 status, int_mask;
-
- status = mtk_irq_pending(eth);
- if (unlikely(!status))
- return IRQ_NONE;
-
- int_mask = (eth->soc->rx_int | eth->soc->tx_int);
- if (likely(status & int_mask)) {
- if (likely(napi_schedule_prep(&eth->rx_napi)))
- __napi_schedule(&eth->rx_napi);
- } else {
- mtk_irq_ack(eth, status);
- }
- mtk_irq_disable(eth, int_mask);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
- mtk_irq_disable(eth, int_mask);
- mtk_handle_irq(dev->irq, dev);
- mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
- unsigned long sysclk = eth->sysclk;
-
- sysclk /= MTK_US_CYC_CNT_DIVISOR;
- sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
- mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
- ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
- sysclk,
- MTK_GLO_CFG);
- return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
- u32 fwd_cfg;
-
- fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
- /* disable jumbo frame */
- if (eth->soc->jumbo_frame)
- fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
- /* set unicast/multicast/broadcast frame to cpu */
- fwd_cfg &= ~0xffff;
-
- mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
- if (eth->soc->hw_features & NETIF_F_RXCSUM)
- mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
- (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
- MTK_GDMA1_FWD_CFG);
- else
- mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
- ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
- MTK_GDMA1_FWD_CFG);
- if (eth->soc->hw_features & NETIF_F_IP_CSUM)
- mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
- (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
- MTK_CDMA_CSG_CFG);
- else
- mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
- ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
- MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
- unsigned long flags;
- u32 val;
- int err;
-
- if (eth->soc->dma_type == MTK_PDMA)
- err = mtk_pdma_init(eth);
- else if (eth->soc->dma_type == MTK_QDMA)
- err = mtk_qdma_init(eth, 0);
- else
- err = mtk_pdma_qdma_init(eth);
- if (err) {
- mtk_dma_free(eth);
- return err;
- }
-
- spin_lock_irqsave(&eth->page_lock, flags);
-
- val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
- if (eth->soc->rx_2b_offset)
- val |= MTK_RX_2B_OFFSET;
- val |= eth->soc->pdma_glo_cfg;
-
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
- return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
- if (!atomic_read(&eth->dma_refcnt)) {
- int err = mtk_start_dma(eth);
-
- if (err)
- return err;
-
- napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
- }
- atomic_inc(&eth->dma_refcnt);
-
- if (eth->phy)
- eth->phy->start(mac);
-
- if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
- netif_carrier_on(dev);
-
- netif_start_queue(dev);
- eth->soc->fwd_config(eth);
-
- return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
- unsigned long flags;
- u32 val;
- int i;
-
- /* stop the dma enfine */
- spin_lock_irqsave(&eth->page_lock, flags);
- val = mtk_r32(eth, glo_cfg);
- mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
- glo_cfg);
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
- /* wait for dma stop */
- for (i = 0; i < 10; i++) {
- val = mtk_r32(eth, glo_cfg);
- if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
- msleep(20);
- continue;
- }
- break;
- }
-}
-
-static int mtk_stop(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- netif_tx_disable(dev);
- if (eth->phy)
- eth->phy->stop(mac);
-
- if (!atomic_dec_and_test(&eth->dma_refcnt))
- return 0;
-
- mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
- napi_disable(&eth->rx_napi);
-
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
- mtk_dma_free(eth);
-
- return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
- int i, err;
-
- eth->soc->reset_fe(eth);
-
- if (eth->soc->switch_init)
- if (eth->soc->switch_init(eth)) {
- dev_err(eth->dev, "failed to initialize switch core\n");
- return -ENODEV;
- }
-
- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
-
- err = mtk_mdio_init(eth);
- if (err)
- return err;
-
- /* disable delay and normal interrupt */
- mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
- /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
- if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
- for (i = 0; i < 16; i += 2)
- mtk_w32(eth, ((i + 1) << 16) + i,
- mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- (i * 2));
-
- if (eth->soc->fwd_config(eth))
- dev_err(eth->dev, "unable to get clock\n");
-
- if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
- mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
- mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
- }
-
- return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct device_node *port;
- const char *mac_addr;
- int err;
-
- mac_addr = of_get_mac_address(mac->of_node);
- if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
-
- /* If the mac address is invalid, use random mac address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- eth_hw_addr_random(dev);
- dev_err(eth->dev, "generated random MAC address %pM\n",
- dev->dev_addr);
- }
- mac->hw->soc->set_mac(mac, dev->dev_addr);
-
- if (eth->soc->port_init)
- for_each_child_of_node(mac->of_node, port)
- if (of_device_is_compatible(port,
- "mediatek,eth-port") &&
- of_device_is_available(port))
- eth->soc->port_init(eth, mac, port);
-
- if (eth->phy) {
- err = eth->phy->connect(mac);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- if (eth->phy)
- eth->phy->disconnect(mac);
- mtk_mdio_cleanup(eth);
-
- mtk_irq_disable(eth, ~0);
- free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- int frag_size, old_mtu;
- u32 fwd_cfg;
-
- if (!eth->soc->jumbo_frame)
- return eth_change_mtu(dev, new_mtu);
-
- frag_size = mtk_max_frag_size(new_mtu);
- if (new_mtu < 68 || frag_size > PAGE_SIZE)
- return -EINVAL;
-
- old_mtu = dev->mtu;
- dev->mtu = new_mtu;
-
- /* return early if the buffer sizes will not change */
- if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
- return 0;
- if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
- return 0;
-
- if (new_mtu <= ETH_DATA_LEN)
- eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
- else
- eth->rx_ring[0].frag_size = PAGE_SIZE;
- eth->rx_ring[0].rx_buf_size =
- mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
- if (!netif_running(dev))
- return 0;
-
- mtk_stop(dev);
- fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
- if (new_mtu <= ETH_DATA_LEN) {
- fwd_cfg &= ~MTK_GDM1_JMB_EN;
- } else {
- fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
- fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
- MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
- }
- mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
- return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
- struct mtk_eth *eth = mac->hw;
- struct net_device *dev = eth->netdev[mac->id];
- int err;
-
- rtnl_lock();
- mtk_stop(dev);
-
- err = mtk_open(dev);
- if (err) {
- netif_alert(eth, ifup, dev,
- "Driver up/down cycle failed, closing device.\n");
- dev_close(dev);
- }
- rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
- if (!eth->netdev[i])
- continue;
-
- unregister_netdev(eth->netdev[i]);
- free_netdev(eth->netdev[i]);
- cancel_work_sync(&mac->pending_work);
- }
-
- return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
- .ndo_init = mtk_init,
- .ndo_uninit = mtk_uninit,
- .ndo_open = mtk_open,
- .ndo_stop = mtk_stop,
- .ndo_start_xmit = mtk_start_xmit,
- .ndo_set_mac_address = mtk_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mtk_do_ioctl,
- .ndo_change_mtu = mtk_change_mtu,
- .ndo_tx_timeout = mtk_tx_timeout,
- .ndo_get_stats64 = mtk_get_stats64,
- .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
- struct mtk_mac *mac;
- const __be32 *_id = of_get_property(np, "reg", NULL);
- int id, err;
-
- if (!_id) {
- dev_err(eth->dev, "missing mac id\n");
- return -EINVAL;
- }
- id = be32_to_cpup(_id);
- if (id >= eth->soc->mac_count || eth->netdev[id]) {
- dev_err(eth->dev, "%d is not a valid mac id\n", id);
- return -EINVAL;
- }
-
- eth->netdev[id] = alloc_etherdev(sizeof(*mac));
- if (!eth->netdev[id]) {
- dev_err(eth->dev, "alloc_etherdev failed\n");
- return -ENOMEM;
- }
- mac = netdev_priv(eth->netdev[id]);
- eth->mac[id] = mac;
- mac->id = id;
- mac->hw = eth;
- mac->of_node = np;
- INIT_WORK(&mac->pending_work, mtk_pending_work);
-
- if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
- mac->hw_stats = devm_kzalloc(eth->dev,
- sizeof(*mac->hw_stats),
- GFP_KERNEL);
- if (!mac->hw_stats) {
- err = -ENOMEM;
- goto free_netdev;
- }
- spin_lock_init(&mac->hw_stats->stats_lock);
- mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
- }
-
- SET_NETDEV_DEV(eth->netdev[id], eth->dev);
- eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
- eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
- if (eth->soc->init_data)
- eth->soc->init_data(eth->soc, eth->netdev[id]);
-
- eth->netdev[id]->vlan_features = eth->soc->hw_features &
- ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- eth->netdev[id]->features |= eth->soc->hw_features;
-
- if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
- eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- mtk_set_ethtool_ops(eth->netdev[id]);
-
- err = register_netdev(eth->netdev[id]);
- if (err) {
- dev_err(eth->dev, "error bringing up device\n");
- err = -ENOMEM;
- goto free_netdev;
- }
- eth->netdev[id]->irq = eth->irq;
- netif_info(eth, probe, eth->netdev[id],
- "mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
- return 0;
-
-free_netdev:
- free_netdev(eth->netdev[id]);
- return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- const struct of_device_id *match;
- struct device_node *mac_np;
- struct mtk_soc_data *soc;
- struct mtk_eth *eth;
- struct clk *sysclk;
- int err;
-
- device_reset(&pdev->dev);
-
- match = of_match_device(of_mtk_match, &pdev->dev);
- soc = (struct mtk_soc_data *)match->data;
-
- if (soc->reg_table)
- mtk_reg_table = soc->reg_table;
-
- eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
- if (!eth)
- return -ENOMEM;
-
- eth->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(eth->base))
- return PTR_ERR(eth->base);
-
- spin_lock_init(&eth->page_lock);
-
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,ethsys");
- if (IS_ERR(eth->ethsys))
- return PTR_ERR(eth->ethsys);
-
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
- }
-
- sysclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sysclk)) {
- dev_err(&pdev->dev,
- "the clock is not defined in the devicetree\n");
- return -ENXIO;
- }
- eth->sysclk = clk_get_rate(sysclk);
-
- eth->switch_np = of_parse_phandle(pdev->dev.of_node,
- "mediatek,switch", 0);
- if (soc->has_switch && !eth->switch_np) {
- dev_err(&pdev->dev, "failed to read switch phandle\n");
- return -ENODEV;
- }
-
- eth->dev = &pdev->dev;
- eth->soc = soc;
- eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
- err = mtk_init_hw(eth);
- if (err)
- return err;
-
- if (eth->soc->mac_count > 1) {
- for_each_child_of_node(pdev->dev.of_node, mac_np) {
- if (!of_device_is_compatible(mac_np,
- "mediatek,eth-mac"))
- continue;
-
- if (!of_device_is_available(mac_np))
- continue;
-
- err = mtk_add_mac(eth, mac_np);
- if (err)
- goto err_free_dev;
- }
-
- init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
- soc->napi_weight);
- } else {
- err = mtk_add_mac(eth, pdev->dev.of_node);
- if (err)
- goto err_free_dev;
- netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
- soc->napi_weight);
- }
-
- platform_set_drvdata(pdev, eth);
-
- return 0;
-
-err_free_dev:
- mtk_cleanup(eth);
- return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
- struct mtk_eth *eth = platform_get_drvdata(pdev);
-
- netif_napi_del(&eth->rx_napi);
- mtk_cleanup(eth);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver mtk_driver = {
- .probe = mtk_probe,
- .remove = mtk_remove,
- .driver = {
- .name = "mtk_soc_eth",
- .of_match_table = of_mtk_match,
- },
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed80433f49..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
- MTK_REG_PDMA_GLO_CFG = 0,
- MTK_REG_PDMA_RST_CFG,
- MTK_REG_DLY_INT_CFG,
- MTK_REG_TX_BASE_PTR0,
- MTK_REG_TX_MAX_CNT0,
- MTK_REG_TX_CTX_IDX0,
- MTK_REG_TX_DTX_IDX0,
- MTK_REG_RX_BASE_PTR0,
- MTK_REG_RX_MAX_CNT0,
- MTK_REG_RX_CALC_IDX0,
- MTK_REG_RX_DRX_IDX0,
- MTK_REG_MTK_INT_ENABLE,
- MTK_REG_MTK_INT_STATUS,
- MTK_REG_MTK_DMA_VID_BASE,
- MTK_REG_MTK_COUNTER_BASE,
- MTK_REG_MTK_RST_GL,
- MTK_REG_MTK_INT_STATUS2,
- MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT 0x80
-#define MTK_DELAY_MAX_INT 0x04
-#define MTK_DELAY_MAX_TOUT 0x04
-#define MTK_DELAY_TIME 20
-#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
- | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT 0x80504000
-#define MTK_PSE_FQFC_CFG_256Q 0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF BIT(31)
-#define MTK_CNT_GDM_AF BIT(29)
-#define MTK_PSE_P2_FC BIT(26)
-#define MTK_PSE_BUF_DROP BIT(24)
-#define MTK_GDM_OTHER_DROP BIT(23)
-#define MTK_PSE_P1_FC BIT(22)
-#define MTK_PSE_P0_FC BIT(21)
-#define MTK_PSE_FQ_EMPTY BIT(20)
-#define MTK_GE1_STA_CHG BIT(18)
-#define MTK_TX_COHERENT BIT(17)
-#define MTK_RX_COHERENT BIT(16)
-#define MTK_TX_DONE_INT3 BIT(11)
-#define MTK_TX_DONE_INT2 BIT(10)
-#define MTK_TX_DONE_INT1 BIT(9)
-#define MTK_TX_DONE_INT0 BIT(8)
-#define MTK_RX_DONE_INT0 BIT(2)
-#define MTK_TX_DLY_INT BIT(1)
-#define MTK_RX_DLY_INT BIT(0)
-
-#define MTK_RX_DONE_INT MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT BIT(30)
-#define RT5350_TX_DLY_INT BIT(28)
-#define RT5350_RX_DONE_INT1 BIT(17)
-#define RT5350_RX_DONE_INT0 BIT(16)
-#define RT5350_TX_DONE_INT3 BIT(3)
-#define RT5350_TX_DONE_INT2 BIT(2)
-#define RT5350_TX_DONE_INT1 BIT(1)
-#define RT5350_TX_DONE_INT0 BIT(0)
-
-#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
- RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET 0x0020
-#define MTK_PSE_OFFSET 0x0040
-#define MTK_GDMA2_OFFSET 0x0060
-#define MTK_CDMA_OFFSET 0x0080
-#define MTK_DMA_VID0 0x00a8
-#define MTK_PDMA_OFFSET 0x0100
-#define MTK_PPE_OFFSET 0x0200
-#define MTK_CMTABLE_OFFSET 0x0400
-#define MTK_POLICYTABLE_OFFSET 0x1000
-
-#define MT7621_GDMA_OFFSET 0x0500
-#define MT7620_GDMA_OFFSET 0x0600
-
-#define RT5350_PDMA_OFFSET 0x0800
-#define RT5350_SDM_OFFSET 0x0c00
-
-#define MTK_MDIO_ACCESS 0x00
-#define MTK_MDIO_CFG 0x04
-#define MTK_GLO_CFG 0x08
-#define MTK_RST_GL 0x0C
-#define MTK_INT_STATUS 0x10
-#define MTK_INT_ENABLE 0x14
-#define MTK_MDIO_CFG2 0x18
-#define MTK_FOC_TS_T 0x1C
-
-#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10)
-
-#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04)
-
-#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN BIT(16)
-#define RT5350_SDM_TCS_EN BIT(17)
-#define RT5350_SDM_UCS_EN BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0 0x1900
-#define MTK_QRX_MAX_CNT0 0x1904
-#define MTK_QRX_CRX_IDX0 0x1908
-#define MTK_QRX_DRX_IDX0 0x190C
-#define MTK_QDMA_GLO_CFG 0x1A04
-#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_QDMA_DELAY_INT 0x1A0C
-#define MTK_QDMA_FC_THRES 0x1A10
-#define MTK_QMTK_INT_STATUS 0x1A18
-#define MTK_QMTK_INT_ENABLE 0x1A1C
-#define MTK_QDMA_HRED2 0x1A44
-
-#define MTK_QTX_CTX_PTR 0x1B00
-#define MTK_QTX_DTX_PTR 0x1B04
-
-#define MTK_QTX_CRX_PTR 0x1B10
-#define MTK_QTX_DRX_PTR 0x1B14
-
-#define MTK_QDMA_FQ_HEAD 0x1B20
-#define MTK_QDMA_FQ_TAIL 0x1B24
-#define MTK_QDMA_FQ_CNT 0x1B28
-#define MTK_QDMA_FQ_BLEN 0x1B2C
-
-#define QDMA_PAGE_SIZE 2048
-#define QDMA_TX_OWNER_CPU BIT(31)
-#define QDMA_TX_SWC BIT(14)
-#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES 4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK 0xf
-#define MTK_GDM1_JMB_LEN_SHIFT 28
-#define MTK_GDM1_ICS_EN BIT(22)
-#define MTK_GDM1_TCS_EN BIT(21)
-#define MTK_GDM1_UCS_EN BIT(20)
-#define MTK_GDM1_JMB_EN BIT(19)
-#define MTK_GDM1_STRPCRC BIT(16)
-#define MTK_GDM1_UFRC_P_CPU (0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12)
-#define MTK_GDM1_UFRC_P_PPE (6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN BIT(2)
-#define MTK_UCS_GEN_EN BIT(1)
-#define MTK_TCS_GEN_EN BIT(0)
-
-/* dma mode */
-#define MTK_PDMA BIT(0)
-#define MTK_QDMA BIT(1)
-#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0 BIT(16)
-#define MTK_PST_DTX_IDX3 BIT(3)
-#define MTK_PST_DTX_IDX2 BIT(2)
-#define MTK_PST_DTX_IDX1 BIT(1)
-#define MTK_PST_DTX_IDX0 BIT(0)
-
-#define MTK_RX_2B_OFFSET BIT(31)
-#define MTK_TX_WB_DDONE BIT(6)
-#define MTK_RX_DMA_BUSY BIT(3)
-#define MTK_TX_DMA_BUSY BIT(1)
-#define MTK_RX_DMA_EN BIT(2)
-#define MTK_TX_DMA_EN BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS (0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
-
-#define MTK_US_CYC_CNT_MASK 0xff
-#define MTK_US_CYC_CNT_SHIFT 0x8
-#define MTK_US_CYC_CNT_DIVISOR 1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE BIT(31)
-#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID BIT(30)
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
-
-struct mtk_rx_dma {
- unsigned int rxd1;
- unsigned int rxd2;
- unsigned int rxd3;
- unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN 0x3fff
-#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1 BIT(14)
-#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_DONE BIT(31)
-#define TX_DMA_FPORT_SHIFT 25
-#define TX_DMA_FPORT_MASK 0x7
-#define TX_DMA_INS_VLAN_MT7621 BIT(16)
-#define TX_DMA_INS_VLAN BIT(7)
-#define TX_DMA_INS_PPPOE BIT(12)
-#define TX_DMA_TAG BIT(15)
-#define TX_DMA_TAG_MASK BIT(15)
-#define TX_DMA_QN(_x) ((_x) << 16)
-#define TX_DMA_PN(_x) ((_x) << 24)
-#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
-#define TX_DMA_UDF BIT(20)
-#define TX_DMA_CHKSUM (0x7 << 29)
-#define TX_DMA_TSO BIT(28)
-#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT BIT(0)
-#define MTK_PHY_FLAG_ATTACH BIT(1)
-
-struct mtk_tx_dma {
- unsigned int txd1;
- unsigned int txd2;
- unsigned int txd3;
- unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
- spinlock_t lock;
-
- struct phy_device *phy[8];
- struct device_node *phy_node[8];
- const __be32 *phy_fixed[8];
- int duplex[8];
- int speed[8];
- int tx_fc[8];
- int rx_fc[8];
- int (*connect)(struct mtk_mac *mac);
- void (*disconnect)(struct mtk_mac *mac);
- void (*start)(struct mtk_mac *mac);
- void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table: Some of the legacy registers changed their location
- * over time. Their offsets are stored in this table
- *
- * @init_data: Some features depend on the silicon revision. This
- * callback allows runtime modification of the content of
- * this struct
- * @reset_fe: This callback is used to trigger the reset of the frame
- * engine
- * @set_mac: This callback is used to set the unicast mac address
- * filter
- * @fwd_config: This callback is used to setup the forward config
- * register of the MAC
- * @switch_init: This callback is used to bring up the switch core
- * @port_init: Some SoCs have ports that can be router to a switch port
- * or an external PHY. This callback is used to setup these
- * ports.
- * @has_carrier: This callback allows driver to check if there is a cable
- * attached.
- * @mdio_init: This callbck is used to setup the MDIO bus if one is
- * present
- * @mdio_cleanup: This callback is used to cleanup the MDIO state.
- * @mdio_write: This callback is used to write data to the MDIO bus.
- * @mdio_read: This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link: This callback is used to apply the PHY settings.
- * @piac_offset: the PIAC register has a different different base offset
- * @hw_features: feature set depends on the SoC type
- * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type: SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg: the default DMA configuration
- * @rx_int: the TX interrupt bits used by the SoC
- * @tx_int: the TX interrupt bits used by the SoC
- * @status_int: the Status interrupt bits used by the SoC
- * @checksum_bit: the bits used to turn on HW checksumming
- * @txd4: default value of the TXD4 descriptor
- * @mac_count: the number of MACs that the SoC has
- * @new_stats: there is a old and new way to read hardware stats
- * registers
- * @jumbo_frame: does the SoC support jumbo frames ?
- * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma: scatter gather support
- * @padding_64b enable 64 bit padding
- * @padding_bug: rt2880 has a padding bug
- * @has_switch: does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
- const u16 *reg_table;
-
- void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
- void (*reset_fe)(struct mtk_eth *eth);
- void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
- int (*fwd_config)(struct mtk_eth *eth);
- int (*switch_init)(struct mtk_eth *eth);
- void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *port);
- int (*has_carrier)(struct mtk_eth *eth);
- int (*mdio_init)(struct mtk_eth *eth);
- void (*mdio_cleanup)(struct mtk_eth *eth);
- int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
- u16 val);
- int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
- void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
- u32 piac_offset;
- netdev_features_t hw_features;
- u32 dma_ring_size;
- u32 napi_weight;
- u32 dma_type;
- u32 pdma_glo_cfg;
- u32 rx_int;
- u32 tx_int;
- u32 status_int;
- u32 checksum_bit;
- u32 txd4;
- u32 mac_count;
-
- u32 new_stats:1;
- u32 jumbo_frame:1;
- u32 rx_2b_offset:1;
- u32 rx_sg_dma:1;
- u32 padding_64b:1;
- u32 padding_bug:1;
- u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET 0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock: make sure that stats operations are atomic
- * @reg_offset: the status register offset of the SoC
- * @syncp: the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
- spinlock_t stats_lock;
- u32 reg_offset;
- struct u64_stats_sync syncp;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_skip;
- u64 tx_collisions;
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_overflow;
- u64 rx_fcs_errors;
- u64 rx_short_errors;
- u64 rx_long_errors;
- u64 rx_checksum_errors;
- u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
- MTK_TX_FLAGS_SINGLE0 = 0x01,
- MTK_TX_FLAGS_PAGE0 = 0x02,
- MTK_TX_FLAGS_PAGE1 = 0x04,
-};
-
-/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
- * by the TX descriptor s
- * @skb: The SKB pointer of the packet being sent
- * @dma_addr0: The base addr of the first segment
- * @dma_len0: The length of the first segment
- * @dma_addr1: The base addr of the second segment
- * @dma_len1: The length of the second segment
- */
-struct mtk_tx_buf {
- struct sk_buff *skb;
- u32 flags;
- DEFINE_DMA_UNMAP_ADDR(dma_addr0);
- DEFINE_DMA_UNMAP_LEN(dma_len0);
- DEFINE_DMA_UNMAP_ADDR(dma_addr1);
- DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring - This struct holds info describing a TX ring
- * @tx_dma: The descriptor ring
- * @tx_buf: The memory pointed at by the ring
- * @tx_phys: The physical addr of tx_buf
- * @tx_next_free: Pointer to the next free descriptor
- * @tx_last_free: Pointer to the last free descriptor
- * @tx_thresh: The threshold of minimum amount of free descriptors
- * @tx_map: Callback to map a new packet into the ring
- * @tx_poll: Callback for the housekeeping function
- * @tx_clean: Callback for the cleanup function
- * @tx_ring_size: How many descriptors are in the ring
- * @tx_free_idx: The index of th next free descriptor
- * @tx_next_idx: QDMA uses a linked list. This element points to the next
- * free descriptor in the list
- * @tx_free_count: QDMA uses a linked list. Track how many free descriptors
- * are present
- */
-struct mtk_tx_ring {
- struct mtk_tx_dma *tx_dma;
- struct mtk_tx_buf *tx_buf;
- dma_addr_t tx_phys;
- struct mtk_tx_dma *tx_next_free;
- struct mtk_tx_dma *tx_last_free;
- u16 tx_thresh;
- int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
- struct mtk_tx_ring *ring, bool gso);
- int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
- void (*tx_clean)(struct mtk_eth *eth);
-
- /* PDMA only */
- u16 tx_ring_size;
- u16 tx_free_idx;
-
- /* QDMA only */
- u16 tx_next_idx;
- atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring - This struct holds info describing a RX ring
- * @rx_dma: The descriptor ring
- * @rx_data: The memory pointed at by the ring
- * @trx_phys: The physical addr of rx_buf
- * @rx_ring_size: How many descriptors are in the ring
- * @rx_buf_size: The size of each packet buffer
- * @rx_calc_idx: The current head of ring
- */
-struct mtk_rx_ring {
- struct mtk_rx_dma *rx_dma;
- u8 **rx_data;
- dma_addr_t rx_phys;
- u16 rx_ring_size;
- u16 frag_size;
- u16 rx_buf_size;
- u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS 2
-
-/* struct mtk_eth - This is the main datasructure for holding the state
- * of the driver
- * @dev: The device pointer
- * @base: The mapped register i/o base
- * @page_lock: Make sure that register operations are atomic
- * @soc: pointer to our SoC specific data
- * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
- * dummy for NAPI to work
- * @netdev: The netdev instances
- * @mac: Each netdev is linked to a physical MAC
- * @switch_np: The phandle for the switch
- * @irq: The IRQ that we are using
- * @msg_enable: Ethtool msg level
- * @ysclk: The sysclk rate - neeed for calibration
- * @ethsys: The register map pointing at the range used to setup
- * MII modes
- * @dma_refcnt: track how many netdevs are using the DMA engine
- * @tx_ring: Pointer to the memore holding info about the TX ring
- * @rx_ring: Pointer to the memore holding info about the RX ring
- * @rx_napi: The NAPI struct
- * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head: The scratch memory that scratch_ring points to.
- * @phy: Info about the attached PHYs
- * @mii_bus: If there is a bus we need to create an instance for it
- * @link: Track if the ports have a physical link
- * @sw_priv: Pointer to the switches private data
- * @vlan_map: RX VID tracking
- */
-
-struct mtk_eth {
- struct device *dev;
- void __iomem *base;
- spinlock_t page_lock;
- struct mtk_soc_data *soc;
- struct net_device dummy_dev;
- struct net_device *netdev[MTK_MAX_DEVS];
- struct mtk_mac *mac[MTK_MAX_DEVS];
- struct device_node *switch_np;
- int irq;
- u32 msg_enable;
- unsigned long sysclk;
- struct regmap *ethsys;
- atomic_t dma_refcnt;
- struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring[2];
- struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
- void *scratch_head;
- struct mtk_phy *phy;
- struct mii_bus *mii_bus;
- int link[8];
- void *sw_priv;
- unsigned long vlan_map;
-};
-
-/* struct mtk_mac - the structure that holds the info about the MACs of the
- * SoC
- * @id: The number of the MAC
- * @of_node: Our devicetree node
- * @hw: Backpointer to our main datastruture
- * @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
- * @phy_flags: The PHYs flags
- * @pending_work: The workqueue used to reset the dma ring
- */
-struct mtk_mac {
- int id;
- struct device_node *of_node;
- struct mtk_eth *hw;
- struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
- u32 phy_flags;
- struct work_struct pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d96f6b..000000000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG 0x400
-#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE BIT(6)
-#define MT7621_L4_VALID BIT(24)
-
-#define MT7621_TX_DMA_UDF BIT(19)
-
-#define CDMA_ICS_EN BIT(2)
-#define CDMA_UCS_EN BIT(1)
-#define CDMA_TCS_EN BIT(0)
-
-#define GDMA_ICS_EN BIT(22)
-#define GDMA_TCS_EN BIT(21)
-#define GDMA_UCS_EN BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET 0x2000
-#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL 0x508
-#define GSW_REG_GDMA1_MAC_ADRH 0x50C
-#define GSW_REG_GDMA2_MAC_ADRL 0x1508
-#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
-#define MT7621_MTK_RST_GL 0x04
-#define MT7620_MTK_INT_STATUS2 0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF BIT(28)
-#define MT7621_MTK_GDM2_AF BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
- [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
- [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
- [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
- [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
- [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
- [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
- [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
- [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
- [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
- [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
- [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
- [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
- [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
- [MTK_REG_MTK_DMA_VID_BASE] = 0,
- [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
- [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
- [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
- mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
- /* Setup GMAC1 only, there is no support for GMAC2 yet */
- mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
- MT7620_GDMA1_FWD_CFG);
-
- /* Enable RX checksum */
- mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
- GDMA_TCS_EN | GDMA_UCS_EN),
- MT7620_GDMA1_FWD_CFG);
-
- /* Enable RX VLan Offloading */
- mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
- return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mac->hw->page_lock, flags);
- if (mac->id == 0) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
- GSW_REG_GDMA1_MAC_ADRH);
- mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
- (hwaddr[4] << 8) | hwaddr[5],
- GSW_REG_GDMA1_MAC_ADRL);
- }
- if (mac->id == 1) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
- GSW_REG_GDMA2_MAC_ADRH);
- mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
- (hwaddr[4] << 8) | hwaddr[5],
- GSW_REG_GDMA2_MAC_ADRL);
- }
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
- .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_IPV6_CSUM,
- .dma_type = MTK_PDMA,
- .dma_ring_size = 256,
- .napi_weight = 64,
- .new_stats = 1,
- .padding_64b = 1,
- .rx_2b_offset = 1,
- .rx_sg_dma = 1,
- .has_switch = 1,
- .mac_count = 2,
- .reset_fe = mt7621_mtk_reset,
- .set_mac = mt7621_set_mac,
- .fwd_config = mt7621_fwd_config,
- .switch_init = mtk_gsw_init,
- .reg_table = mt7621_reg_table,
- .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
- .rx_int = RT5350_RX_DONE_INT,
- .tx_int = RT5350_TX_DONE_INT,
- .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
- .checksum_bit = MT7621_L4_VALID,
- .has_carrier = mt7620_has_carrier,
- .mdio_read = mt7620_mdio_read,
- .mdio_write = mt7620_mdio_write,
- .mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d33533872a16..c8fa17cfa807 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
+ depends on PCI
select PCI_DRIVERS_GENERIC
help
This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248eecf123..2aee64fdaec5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
goto no_phy;
phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ priv->phy_mode);
of_node_put(phy_node);
if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c5670ef6..986db76705cc 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
return np;
}
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+ int port)
{
+ struct device_node *np = priv->of_node;
u32 delay_value;
+ bool rx_delay;
+ bool tx_delay;
- if (!of_property_read_u32(np, "rx-delay", &delay_value))
+ /* By default, both RX/TX delay is enabled in
+ * __cvmx_helper_rgmii_enable().
+ */
+ rx_delay = true;
+ tx_delay = true;
+
+ if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
- if (!of_property_read_u32(np, "tx-delay", &delay_value))
+ rx_delay = delay_value > 0;
+ }
+ if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+ tx_delay = delay_value > 0;
+ }
+
+ if (!rx_delay && !tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (!rx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (!tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
}
static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
priv->port = port;
priv->queue = cvmx_pko_get_base_queue(priv->port);
priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+ priv->phy_mode = PHY_INTERFACE_MODE_NA;
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
strcpy(dev->name, "spi%d");
break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+ strcpy(dev->name, "eth%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
- cvm_set_rgmii_delay(priv->of_node, interface,
+ cvm_set_rgmii_delay(priv, interface,
port_index);
break;
}
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f43d12..be570d33685a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
#define OCTEON_ETHERNET_H
#include <linux/of.h>
-
+#include <linux/phy.h>
#include <asm/octeon/cvmx-helper-board.h>
/**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
* cvmx_helper_interface_mode_t
*/
int imode;
+ /* PHY mode */
+ phy_interface_t phy_mode;
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d4153414..a54286498a47 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
{
unsigned char lob;
int ret, i;
- struct dcon_gpio *pin = &gpios_asis[0];
+ const struct dcon_gpio *pin = &gpios_asis[0];
for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a96b4..952f2ab51347 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res == _FAIL)
+ goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
sizeof(struct hw_xmit), GFP_KERNEL);
+ if (!pxmitpriv->hwxmits)
+ return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c74ea1..ba7e15fbde72 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02f7c9f..8c36acedf507 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
- u32 val;
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- if (pcmd->rsp && pcmd->rspsz > 0)
- memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
+ r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb77666d44..1ef86b8c592f 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
static struct _cmd_callback cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
- {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Read_BBREG), NULL},
{GEN_CMD_CODE(_Write_BBREG), NULL},
{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61bcb469..b87f13a0b563 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
}
}
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res == _FAIL)
+ goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
- if (pxmitpriv->hwxmits == NULL) {
- DBG_871X("alloc hwxmits fail!...\n");
- return;
- }
+ if (!pxmitpriv->hwxmits)
+ return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
}
-
+ return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b9182b31..37f42b2f22f1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed954abb..4cc77b2016e1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
rtlpriv->phydm.internal =
kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+ if (!rtlpriv->phydm.internal)
+ return 0;
_rtl_phydm_init_com_info(rtlpriv, ic, params);
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1382aa..cf6b7a80b753 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1_rsvd_page_loc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
totalpacketlen);
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce85655..9d85a3a1af4c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
+ synth_soft.alive = 1;
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
- if (!unicode)
- synth_buffer_skip_nonlatin1();
- if (!synth_buffer_empty() || speakup_info.flushing)
- break;
+ if (synth_current() == &synth_soft) {
+ if (!unicode)
+ synth_buffer_skip_nonlatin1();
+ if (!synth_buffer_empty() || speakup_info.flushing)
+ break;
+ }
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (fp->f_flags & O_NONBLOCK) {
finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
while (chars_sent <= count - bytes_per_ch) {
+ if (synth_current() != &synth_soft)
+ break;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
poll_wait(fp, &speakup_event, wait);
spin_lock_irqsave(&speakup_info.spinlock, flags);
- if (!synth_buffer_empty() || speakup_info.flushing)
+ if (synth_current() == &synth_soft &&
+ (!synth_buffer_empty() || speakup_info.flushing))
ret = EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e688878fc7..ac6a74883af4 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
int synth_release_region(unsigned long start, unsigned long n);
int synth_add(struct spk_synth *in_synth);
void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
extern struct speakup_info_t speakup_info;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259ee4ffc..3568bfb89912 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
}
EXPORT_SYMBOL_GPL(synth_remove);
+struct spk_synth *synth_current(void)
+{
+ return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf83be35..064d0db4c51e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
struct device_node *fw_node;
const struct of_device_id *of_id;
struct vchiq_drvdata *drvdata;
+ struct device *vchiq_dev;
int err;
of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
goto failed_platform_init;
}
- if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
- NULL, "vchiq")))
+ vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+ "vchiq");
+ if (IS_ERR(vchiq_dev)) {
+ err = PTR_ERR(vchiq_dev);
goto failed_device_create;
+ }
vchiq_debugfs_init();
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985b58a1..c6bb4aaf9bd0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
return;
}
- MACvIntDisable(priv->PortOffset);
-
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
+
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
{
struct vnt_private *priv = arg;
- if (priv->vif)
- schedule_work(&priv->interrupt_work);
+ schedule_work(&priv->interrupt_work);
+
+ MACvIntDisable(priv->PortOffset);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 720760cd493f..ba39647a690c 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
static void bcm2835_thermal_debugfs(struct platform_device *pdev)
{
- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
- struct bcm2835_thermal_data *data = tz->devdata;
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
struct debugfs_regset32 *regset;
data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
data->tz = tz;
- platform_set_drvdata(pdev, tz);
+ platform_set_drvdata(pdev, data);
/*
* Thermal_zone doesn't enable hwmon as default,
@@ -290,8 +289,8 @@ err_clk:
static int bcm2835_thermal_remove(struct platform_device *pdev)
{
- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
- struct bcm2835_thermal_data *data = tz->devdata;
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tz = data->tz;
debugfs_remove_recursive(data->debugfsdir);
thermal_zone_of_sensor_unregister(&pdev->dev, tz);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6fff16113628..f7c1f49ec87f 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 power,
unsigned long *state)
{
- unsigned int cur_freq, target_freq;
+ unsigned int target_freq;
u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
- cur_freq = cpufreq_quick_get(policy->cpu);
power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (power * 100) / last_load;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 61ca7ce3624e..5f3ed24e26ec 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
INT3400_THERMAL_PASSIVE_1,
INT3400_THERMAL_ACTIVE,
INT3400_THERMAL_CRITICAL,
+ INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+ INT3400_THERMAL_EMERGENCY_CALL_MODE,
+ INT3400_THERMAL_PASSIVE_2,
+ INT3400_THERMAL_POWER_BOSS,
+ INT3400_THERMAL_VIRTUAL_SENSOR,
+ INT3400_THERMAL_COOLING_MODE,
+ INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
INT3400_THERMAL_MAXIMUM_UUID,
};
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"3A95C389-E4B8-4629-A526-C52C88626BAE",
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+ "5349962F-71E6-431D-9AE8-0A635B710AEE",
+ "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+ "F5A35014-C209-46A4-993A-EB56DE7530A1",
+ "6ED722A7-9240-48A5-B479-31EEF723D7CF",
+ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+ "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
};
struct int3400_thermal_priv {
@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
- }
+ int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+ int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+
priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
priv, &int3400_thermal_ops,
&int3400_thermal_params, 0, 0);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 7571f7c2e7c9..ac7256b5f020 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
bool clamping;
};
-static struct powerclamp_worker_data * __percpu worker_data;
+static struct powerclamp_worker_data __percpu *worker_data;
static struct thermal_cooling_device *cooling_dev;
static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
* clamping kthread worker
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
struct kthread_worker *worker;
- worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
+ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
if (IS_ERR(worker))
return;
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 5c07a61447d3..e4ea7f6aef20 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -199,6 +199,9 @@ enum {
#define MT7622_TS1 0
#define MT7622_NUM_CONTROLLER 1
+/* The maximum number of banks */
+#define MAX_NUM_ZONES 8
+
/* The calibration coefficient of sensor */
#define MT7622_CALIBRATION 165
@@ -249,7 +252,7 @@ struct mtk_thermal_data {
const int num_controller;
const int *controller_offset;
bool need_switch_bank;
- struct thermal_bank_cfg bank_data[];
+ struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
};
struct mtk_thermal {
@@ -268,7 +271,7 @@ struct mtk_thermal {
s32 vts[MAX_NUM_VTS];
const struct mtk_thermal_data *conf;
- struct mtk_thermal_bank banks[];
+ struct mtk_thermal_bank banks[MAX_NUM_ZONES];
};
/* MT8183 thermal sensor data */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 48eef552cba4..fc9399d9c082 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
struct exynos_tmu_data *data = p;
int value, ret = 0;
- if (!data || !data->tmu_read || !data->enabled)
+ if (!data || !data->tmu_read)
return -EINVAL;
else if (!data->enabled)
/*
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d54818..3bdd56a1021b 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
struct clk *clk;
};
-static inline bool ar933x_uart_console_enabled(void)
-{
- return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
.index = -1,
.data = &ar933x_uart_driver,
};
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
- if (!ar933x_uart_console_enabled())
- return;
-
- ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
- ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_console_ports[up->port.line] = up;
+#endif
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
{
int ret;
- if (ar933x_uart_console_enabled())
- ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe24343..0b4f36905321 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
unsigned int pending_status;
spinlock_t lock_suspended;
+ bool hd_start_rx; /* can start RX during half-duplex operation */
+
/* ISO7816 */
unsigned int fidi_min;
unsigned int fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
__raw_writeb(value, port->membase + ATMEL_US_THR);
}
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+ return ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+ (port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_start_rx(port);
+
}
/*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
return;
if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_stop_rx(port);
if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
- else if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
+ else if (atmel_uart_is_half_duplex(port)) {
+ /*
+ * DMA done, re-enable TXEMPTY and signal that we can stop
+ * TX and start RX for RS485
+ */
+ atmel_port->hd_start_rx = true;
+ atmel_uart_writel(port, ATMEL_US_IER,
+ atmel_port->tx_done_mask);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "Preparing DMA cyclic failed\n");
+ goto chan_err;
+ }
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
- /* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
+
+ /* Start RX if flag was set and FIFO is empty */
+ if (atmel_port->hd_start_rx) {
+ if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+ & ATMEL_US_TXEMPTY))
+ dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+ atmel_port->hd_start_rx = false;
+ atmel_start_rx(port);
+ return;
+ }
+
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
}
}
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
} else {
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED) {
+ if (atmel_uart_is_half_duplex(port)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e7af71..bfe5e9e034ec 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
char *cptr = config;
struct console *cons;
- if (!strlen(config) || isspace(config[0]))
+ if (!strlen(config) || isspace(config[0])) {
+ err = 0;
goto noconfig;
+ }
kgdboc_io_ops.is_console = 0;
kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde405627..450ba6d7996c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(max310x_dt_ids, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
devtype = (struct max310x_devtype *)of_id->data;
} else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751d1ef4..7e7b1559fa36 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (!match)
+ return -ENODEV;
+
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a526cce..4c188f4079b3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.mapbase = r->start;
s->port.membase = ioremap(r->start, resource_size(r));
+ if (!s->port.membase) {
+ ret = -ENOMEM;
+ goto out_disable_clks;
+ }
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c20219..35e5f9c5d5be 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
- int baud;
+ int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178cf3eed..09a183dfc526 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
- return ret;
+ goto err_i2c;
}
#endif
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
- return ret;
+ goto err_spi;
}
#endif
return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
}
module_init(sc16is7xx_init);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd42b6d5..2d1c626312cd 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit))
sci_stop_tx(port);
- } else {
- ctrl = serial_port_in(port, SCSCR);
-
- if (port->type != PORT_SCI) {
- serial_port_in(port, SCxSR); /* Dummy read */
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
- }
- ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
- }
}
/* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..a9e12b3bc31d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
if (tty && C_HUPCL(tty))
tty_port_lower_dtr_rts(port);
- if (port->ops->shutdown)
+ if (port->ops && port->ops->shutdown)
port->ops->shutdown(port);
}
out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
*/
int tty_port_carrier_raised(struct tty_port *port)
{
- if (port->ops->carrier_raised == NULL)
+ if (!port->ops || !port->ops->carrier_raised)
return 1;
return port->ops->carrier_raised(port);
}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
- if (port->ops->dtr_rts)
+ if (port->ops && port->ops->dtr_rts)
port->ops->dtr_rts(port, 1);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
- if (port->ops->dtr_rts)
+ if (port->ops && port->ops->dtr_rts)
port->ops->dtr_rts(port, 0);
}
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
if (!tty_port_initialized(port)) {
clear_bit(TTY_IO_ERROR, &tty->flags);
- if (port->ops->activate) {
+ if (port->ops && port->ops->activate) {
int retval = port->ops->activate(port, tty);
if (retval) {
mutex_unlock(&port->mutex);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f8960811a..ec666eb4b7b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
clear_bit(EVENT_RX_STALL, &acm->flags);
}
- if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+ if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
tty_port_tty_wakeup(&acm->port);
- clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
- }
}
/*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bbc15e4..73c8e6591746 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
do {
controller = of_find_node_with_property(controller, "phys");
+ if (!of_device_is_available(controller))
+ continue;
index = 0;
do {
if (arg0 == -1) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181bb628..975d7c1288e3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST_SS);
if (retval)
+ retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+ PHY_MODE_USB_HOST);
+ if (retval)
goto err_usb_phy_roothub_power_on;
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e403e8..8cced3609e24 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a5b25c..f3816a5c861e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ try_again:
req->complete = f_hidg_req_complete;
req->context = hidg;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- goto release_write_pending_unlocked;
+ goto release_write_pending;
} else {
status = count;
}
- spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f3126580e..c2011cd7df8c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f82450bf4..898339e5df10 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
(void) readl(&ep->dev->pci->pcimstctl);
writel(BIT(DMA_START), &dma->dmastat);
-
- if (!ep->is_in)
- stop_out_naking(ep);
}
static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
writel(BIT(DMA_START), &dma->dmastat);
return;
}
+ stop_out_naking(ep);
}
tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
- dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
- __func__);
+ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
return -EINVAL;
}
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f0a20a..6343fbacd244 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
retval = platform_driver_register(&u132_platform_driver);
+ if (retval)
+ destroy_workqueue(workqueue);
+
return retval;
}
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be578abb0..d932cc31711e 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
return -1;
writel(0, &dbc->regs->control);
- xhci_dbc_mem_cleanup(xhci);
dbc->state = DS_DISABLED;
return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
ret = xhci_do_dbc_stop(xhci);
spin_unlock_irqrestore(&dbc->lock, flags);
- if (!ret)
+ if (!ret) {
+ xhci_dbc_mem_cleanup(xhci);
pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+ }
}
static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece693655..96a740543183 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
port_index = max_ports;
while (port_index--) {
u32 t1, t2;
-
+ int retries = 10;
+retry:
t1 = readl(ports[port_index]->addr);
t2 = xhci_port_state_to_neutral(t1);
portsc_buf[port_index] = 0;
- /* Bail out if a USB3 port has a new device in link training */
- if ((hcd->speed >= HCD_USB3) &&
+ /*
+ * Give a USB3 port in link training time to finish, but don't
+ * prevent suspend as port might be stuck
+ */
+ if ((hcd->speed >= HCD_USB3) && retries-- &&
(t1 & PORT_PLS_MASK) == XDEV_POLLING) {
- bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
- return -EBUSY;
+ msleep(XHCI_PORT_POLLING_LFPS_TIME);
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+ port_index);
+ goto retry;
}
-
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e463715779..671bce18782c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
if (!xhci_rcar_wait_for_pll_active(hcd))
return -ETIMEDOUT;
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c4d041..9215a28dad40 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
- if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
- DEV_SUPERSPEED_ANY(portsc)) {
+ if ((portsc & PORT_PLC) &&
+ DEV_SUPERSPEED_ANY(portsc) &&
+ ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+ (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+ (portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* We've just brought the device into U0 through either the
+ /* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36e3012..9334cdee382a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
*/
#define XHCI_DEFAULT_BESL 4
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
+
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d1d383..04684849d683 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
- if ((port >= 0) && (port <= data->port_cnt))
+ if (port <= data->port_cnt)
hub->port_swap |= BIT(port);
}
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
dev);
int err;
- if (np) {
+ if (np && of_id) {
err = usb251xb_get_ofdata(hub,
(struct usb251xb_data *)of_id->data);
if (err) {
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc23486c4ed..928c2cd6fc00 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@ config USB_MTU3
tristate "MediaTek USB3 Dual Role controller"
depends on USB || USB_GADGET
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on EXTCON || !EXTCON
select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
help
Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23ab0189..979bef9bfb6b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b17471759..1d8461ae2c34 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bedb55a1..5755f0df0025 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
/*
* NovaTech product ids (FTDI_VID)
*/
-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
/*
* Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac75fbf6..18110225d506 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
if (!urbtrack)
return -ENOMEM;
- kref_get(&mos_parport->ref_count);
- urbtrack->mos_parport = mos_parport;
urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urbtrack->urb) {
kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
usb_sndctrlpipe(usbdev, 0),
(unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9410f3..83869065b802 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
+#define QUECTEL_PRODUCT_EM12 0x0512
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db091d8d..a2233d72ae7c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
S(SRC_ATTACHED), \
S(SRC_STARTUP), \
S(SRC_SEND_CAPABILITIES), \
+ S(SRC_SEND_CAPABILITIES_TIMEOUT), \
S(SRC_NEGOTIATE_CAPABILITIES), \
S(SRC_TRANSITION_SUPPLY), \
S(SRC_READY), \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
/* port->hard_reset_count = 0; */
port->caps_count = 0;
port->pd_capable = true;
- tcpm_set_state_cond(port, hard_reset_state(port),
+ tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
PD_T_SEND_SOURCE_CAP);
}
break;
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
+ /*
+ * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+ *
+ * PD 2.0 sinks are supposed to accept src-capabilities with a
+ * 3.0 header and simply ignore any src PDOs which the sink does
+ * not understand such as PPS but some 2.0 sinks instead ignore
+ * the entire PD_DATA_SOURCE_CAP message, causing contract
+ * negotiation to fail.
+ *
+ * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+ * sending src-capabilities with a lower PD revision to
+ * make these broken sinks work.
+ */
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ } else if (port->negotiated_rev > PD_REV20) {
+ port->negotiated_rev--;
+ port->hard_reset_count = 0;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ } else {
+ tcpm_set_state(port, hard_reset_state(port), 0);
+ }
+ break;
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..6770afd40765 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
- irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
- platform_get_irq(pdev, 0));
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
+ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
if (irq < 0)
return irq;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a25659b5a5d1..3fa20e95a6bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8dbb270998f4..6b64e45a5269 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1398,7 +1398,7 @@ unlock_exit:
mutex_unlock(&container->lock);
}
-const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
.name = "iommu-vfio-powerpc",
.owner = THIS_MODULE,
.open = tce_iommu_open,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..d0f731c9920a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+ "Maximum number of user DMA mappings per container (65535).");
+
struct vfio_iommu {
struct list_head domain_list;
struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
struct blocking_notifier_head notifier;
+ unsigned int dma_avail;
bool v2;
bool nesting;
};
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
kfree(dma);
+ iommu->dma_avail++;
}
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
+ if (!iommu->dma_avail) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
ret = -ENOMEM;
goto out_unlock;
}
+ iommu->dma_avail--;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
+ iommu->dma_avail = dma_entry_limit;
mutex_init(&iommu->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
#define GUEST_MAPPINGS_TRIES 5
+#define VBG_KERNEL_REQUEST \
+ (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+ VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
int i, rc;
/* Query the required space. */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+ VBG_KERNEL_REQUEST);
if (!req)
return;
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+ VBG_KERNEL_REQUEST);
if (!req)
return;
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
struct vmmdev_guest_info2 *req2 = NULL;
int rc, ret = -ENOMEM;
- req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
- req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+ req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+ VBG_KERNEL_REQUEST);
+ req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+ VBG_KERNEL_REQUEST);
if (!req1 || !req2)
goto out_free;
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
req2->additions_minor = VBG_VERSION_MINOR;
req2->additions_build = VBG_VERSION_BUILD;
req2->additions_revision = VBG_SVN_REV;
- /* (no features defined yet) */
- req2->additions_features = 0;
+ req2->additions_features =
+ VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
strlcpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
struct vmmdev_guest_status *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
struct vmmdev_heartbeat *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
- VMMDEVREQ_GUEST_HEARTBEAT);
+ VMMDEVREQ_GUEST_HEARTBEAT,
+ VBG_KERNEL_REQUEST);
if (!gdev->guest_heartbeat_req)
return -ENOMEM;
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
struct vmmdev_mask *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
u32 changed, previous;
int rc, ret = 0;
- /* Allocate a request buffer before taking the spinlock */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ /*
+ * Allocate a request buffer before taking the spinlock, when
+ * the session is being terminated the requestor is the kernel,
+ * as we're cleaning up.
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
struct vmmdev_mask *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
u32 changed, previous;
int rc, ret = 0;
- /* Allocate a request buffer before taking the spinlock */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ /*
+ * Allocate a request buffer before taking the spinlock, when
+ * the session is being terminated the requestor is the kernel,
+ * as we're cleaning up.
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
struct vmmdev_host_version *req;
int rc, ret;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
gdev->mem_balloon.get_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
- VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+ VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+ VBG_KERNEL_REQUEST);
gdev->mem_balloon.change_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
- VMMDEVREQ_CHANGE_MEMBALLOON);
+ VMMDEVREQ_CHANGE_MEMBALLOON,
+ VBG_KERNEL_REQUEST);
gdev->cancel_req =
vbg_req_alloc(sizeof(*(gdev->cancel_req)),
- VMMDEVREQ_HGCM_CANCEL2);
+ VMMDEVREQ_HGCM_CANCEL2,
+ VBG_KERNEL_REQUEST);
gdev->ack_events_req =
vbg_req_alloc(sizeof(*gdev->ack_events_req),
- VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+ VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+ VBG_KERNEL_REQUEST);
gdev->mouse_status_req =
vbg_req_alloc(sizeof(*gdev->mouse_status_req),
- VMMDEVREQ_GET_MOUSE_STATUS);
+ VMMDEVREQ_GET_MOUSE_STATUS,
+ VBG_KERNEL_REQUEST);
if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
!gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
* vboxguest_linux.c calls this when userspace opens the char-device.
* Return: A pointer to the new session or an ERR_PTR on error.
* @gdev: The Guest extension device.
- * @user: Set if this is a session for the vboxuser device.
+ * @requestor: VMMDEV_REQUESTOR_* flags
*/
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
{
struct vbg_session *session;
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
return ERR_PTR(-ENOMEM);
session->gdev = gdev;
- session->user_session = user;
+ session->requestor = requestor;
return session;
}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
if (!session->hgcm_client_ids[i])
continue;
- vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+ /* requestor is kernel here, as we're cleaning up. */
+ vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+ session->hgcm_client_ids[i], &rc);
}
kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
return -EPERM;
}
- if (trusted_apps_only && session->user_session) {
+ if (trusted_apps_only &&
+ (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
req->request_type);
return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EMFILE;
- ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
- &conn->hdr.rc);
+ ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+ &client_id, &conn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EINVAL;
- ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+ ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+ &disconn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
}
if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
- ret = vbg_hgcm_call32(gdev, client_id,
+ ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
call->parm_count, &call->hdr.rc);
else
- ret = vbg_hgcm_call(gdev, client_id,
+ ret = vbg_hgcm_call(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS(call),
call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
}
static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+ struct vbg_session *session,
struct vbg_ioctl_write_coredump *dump)
{
struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
return -EINVAL;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+ session->requestor);
if (!req)
return -ENOMEM;
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
case VBG_IOCTL_CHECK_BALLOON:
return vbg_ioctl_check_balloon(gdev, data);
case VBG_IOCTL_WRITE_CORE_DUMP:
- return vbg_ioctl_write_core_dump(gdev, data);
+ return vbg_ioctl_write_core_dump(gdev, session, data);
}
/* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
struct vmmdev_mouse_status *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
* host. Protected by vbg_gdev.session_mutex.
*/
u32 guest_caps;
- /** Does this session belong to a root process or a user one? */
- bool user_session;
+ /** VMMDEV_REQUESTOR_* flags */
+ u32 requestor;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
bool cancel_waiters;
};
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
void vbg_core_close_session(struct vbg_session *session);
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
/* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+ u32 requestor);
void vbg_req_free(void *req, size_t len);
int vbg_req_perform(struct vbg_dev *gdev, void *req);
int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status);
+ struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+ u32 parm_count, int *vbox_status);
#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
* Copyright (C) 2006-2016 Oracle Corporation
*/
+#include <linux/cred.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
static struct vbg_dev *vbg_gdev;
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+ u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+ VMMDEV_REQUESTOR_CON_DONT_KNOW |
+ VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+ if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+ requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+ else
+ requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+ if (in_egroup_p(inode->i_gid))
+ requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+ return requestor;
+}
+
static int vbg_misc_device_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
- session = vbg_core_open_session(gdev, false);
+ session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
if (IS_ERR(session))
return PTR_ERR(session);
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
gdev = container_of(filp->private_data, struct vbg_dev,
misc_device_user);
- session = vbg_core_open_session(gdev, false);
+ session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+ VMMDEV_REQUESTOR_USER_DEVICE);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
if (is_vmmdev_req)
- buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+ buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+ session->requestor);
else
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
VBG_LOG(vbg_debug, pr_debug);
#endif
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+ u32 requestor)
{
struct vmmdev_request_header *req;
int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
req->request_type = req_type;
req->rc = VERR_GENERAL_FAILURE;
req->reserved1 = 0;
- req->reserved2 = 0;
+ req->requestor = requestor;
return req;
}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
return done;
}
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status)
{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
int rc;
hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
- VMMDEVREQ_HGCM_CONNECT);
+ VMMDEVREQ_HGCM_CONNECT, requestor);
if (!hgcm_connect)
return -ENOMEM;
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
}
EXPORT_SYMBOL(vbg_hgcm_connect);
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+ u32 client_id, int *vbox_status)
{
struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
int rc;
hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
- VMMDEVREQ_HGCM_DISCONNECT);
+ VMMDEVREQ_HGCM_DISCONNECT,
+ requestor);
if (!hgcm_disconnect)
return -ENOMEM;
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
return 0;
}
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
- u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
- u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+ int *vbox_status)
{
struct vmmdev_hgcm_call *call;
void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
goto free_bounce_bufs;
}
- call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+ call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
if (!call) {
ret = -ENOMEM;
goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
#ifdef CONFIG_COMPAT
int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status)
+ struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+ u32 parm_count, int *vbox_status)
{
struct vmmdev_hgcm_function_parameter *parm64 = NULL;
u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
goto out_free;
}
- ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+ ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
parm64, parm_count, vbox_status);
if (ret < 0)
goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
#ifndef __VBOX_VERSION_H__
#define __VBOX_VERSION_H__
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
#define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
s32 rc;
/** Reserved field no.1. MBZ. */
u32 reserved1;
- /** Reserved field no.2. MBZ. */
- u32 reserved2;
+ /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+ u32 requestor;
};
VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
};
VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
+
/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
struct vmmdev_guest_info2 {
/** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
u32 additions_build;
/** SVN revision. */
u32 additions_revision;
- /** Feature mask, currently unused. */
+ /** Feature mask. */
u32 additions_features;
/**
* The intentional meaning of this field was:
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d0584c040c60..7a0398bb84f7 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ if (vp_dev->msix_affinity_masks) {
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ }
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 18846afb39da..5df92c308286 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split(
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
+ if (!may_reduce_num)
+ return NULL;
}
if (!num)
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index de01a6d0059d..a1c61e351d3f 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
- GFP_KERNEL);
+ vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
if (!vma_priv)
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c3e201025ef0..0782ff3c2273 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
if (xen_store_evtchn == 0)
return -ENOENT;
- nonseekable_open(inode, filp);
-
- filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+ stream_open(inode, filp);
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)