summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-03-24 11:12:20 +1000
committerDave Airlie <airlied@redhat.com>2015-03-24 11:12:20 +1000
commit74ccbff99787b68e4eb01ef8cf29789229ab0f5d (patch)
treed4e322105618e5b3887f7dc6b54e5d2346974675
parentae10c2248593fb84c6951d67c98c9c934997e56a (diff)
parent0f9e9cd61f46c07246e30871fd638ffeaca3c576 (diff)
Merge tag 'drm-intel-next-2015-03-13-merge' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2015-03-13-rebased: - EU count report param for gen9+ (Jeff McGee) - piles of pll/wm/... fixes for chv, finally out of preliminary hw support (Ville, Vijay) - gen9 rps support from Akash - more work to move towards atomic from Matt, Ander and others - runtime pm support for skl (Damien) - edp1.4 intermediate link clock support (Sonika) - use frontbuffer tracking for fbc (Paulo) - remove ilk rc6 (John Harrison) - a bunch of smaller things and fixes all over Includes backmerge because git rerere couldn't keep up any more. * tag 'drm-intel-next-2015-03-13-merge' of git://anongit.freedesktop.org/drm-intel: (366 commits) drm/i915: Make sure the primary plane is enabled before reading out the fb state drm/i915: Update DRIVER_DATE to 20150313 drm/i915: Fix vmap_batch page iterator overrun drm/i915: Export total subslice and EU counts drm/i915: redefine WARN_ON_ONCE to include the condition drm/i915/skl: Implement WaDisableHBR2 drm/i915: Remove the preliminary_hw_support shackles from CHV drm/i915: Read CHV_PLL_DW8 from the correct offset drm/i915: Rewrite IVB FDI bifurcation conflict checks drm/i915: Rewrite some some of the FDI lane checks drm/i915/skl: Enable the RPS interrupts programming drm/i915/skl: Enabling processing of Turbo interrupts drm/i915/skl: Updated the i915_frequency_info debugfs function drm/i915: Simplify the way BC bifurcation state consistency is kept drm/i915/skl: Updated the act_freq_mhz_show sysfs function drm/i915/skl: Updated the gen9_enable_rps function drm/i915/skl: Updated the gen6_rps_limits function drm/i915/skl: Restructured the gen6_set_rps_thresholds function drm/i915/skl: Updated the gen6_set_rps function drm/i915/skl: Updated the gen6_init_rps_frequencies function ...
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sti.txt4
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt5
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt29
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt (renamed from Documentation/devicetree/bindings/serial/of-serial.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt19
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel-wdt.txt5
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi8
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts8
-rw-r--r--arch/arm/boot/dts/am335x-lxm.dts4
-rw-r--r--arch/arm/boot/dts/am33xx-clocks.dtsi6
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi12
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi7
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi9
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi3
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi5
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi90
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4-cpu-thermal.dtsi52
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi45
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts19
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts57
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi38
-rw-r--r--arch/arm/boot/dts/exynos4212.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi64
-rw-r--r--arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi24
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts15
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi44
-rw-r--r--arch/arm/boot/dts/exynos5420-trip-points.dtsi35
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi33
-rw-r--r--arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi24
-rw-r--r--arch/arm/boot/dts/exynos5440-trip-points.dtsi25
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi18
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts2
-rw-r--r--arch/arm/boot/dts/omap5-core-thermal.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5-gpu-thermal.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi4
-rw-r--r--arch/arm/boot/dts/omap54xx-clocks.dtsi41
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi9
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi6
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm/configs/vexpress_defconfig2
-rw-r--r--arch/arm/include/debug/at91.S5
-rw-r--r--arch/arm/mach-at91/pm.c22
-rw-r--r--arch/arm/mach-at91/pm.h2
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S80
-rw-r--r--arch/arm/mach-exynos/platsmp.c3
-rw-r--r--arch/arm/mach-exynos/pm_domains.c28
-rw-r--r--arch/arm/mach-exynos/suspend.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c103
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c4
-rw-r--r--arch/arm/mach-pxa/idp.c1
-rw-r--r--arch/arm/mach-pxa/lpd270.c2
-rw-r--r--arch/arm/mach-sa1100/neponset.c4
-rw-r--r--arch/arm/mach-sa1100/pleb.c2
-rw-r--r--arch/arm/mach-socfpga/core.h2
-rw-r--r--arch/arm/mach-socfpga/socfpga.c5
-rw-r--r--arch/arm/mach-sti/board-dt.c1
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi4
-rw-r--r--arch/arm64/include/asm/tlb.h3
-rw-r--r--arch/arm64/include/asm/tlbflush.h13
-rw-r--r--arch/arm64/kernel/efi.c9
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/process.c8
-rw-r--r--arch/c6x/include/asm/pgtable.h5
-rw-r--r--arch/microblaze/kernel/entry.S7
-rw-r--r--arch/nios2/include/asm/ptrace.h47
-rw-r--r--arch/nios2/include/asm/ucontext.h32
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild2
-rw-r--r--arch/nios2/include/uapi/asm/elf.h4
-rw-r--r--arch/nios2/include/uapi/asm/ptrace.h50
-rw-r--r--arch/nios2/include/uapi/asm/sigcontext.h12
-rw-r--r--arch/nios2/kernel/signal.c4
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/kernel/jump_label.c12
-rw-r--r--arch/s390/kernel/module.c1
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c68
-rw-r--r--arch/s390/kvm/kvm-s390.h3
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/pci/pci.c28
-rw-r--r--arch/s390/pci/pci_mmio.c17
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--drivers/acpi/acpi_lpss.c5
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c34
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c10
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h6
-rw-r--r--drivers/clk/clk-divider.c29
-rw-r--r--drivers/clk/clk.c27
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c13
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c7
-rw-r--r--drivers/clk/ti/fapll.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c213
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h65
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c24
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h81
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c12
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_display.c398
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c187
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h16
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c91
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c27
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c990
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c47
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c68
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c6
-rw-r--r--drivers/input/misc/mma8450.c1
-rw-r--r--drivers/input/mouse/alps.c4
-rw-r--r--drivers/input/mouse/cyapa_gen3.c2
-rw-r--r--drivers/input/mouse/cyapa_gen5.c4
-rw-r--r--drivers/input/mouse/focaltech.c50
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/mouse/psmouse.h6
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/exynos-iommu.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/omap-iommu.c7
-rw-r--r--drivers/iommu/rockchip-iommu.c7
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c21
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c157
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c20
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c50
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb.c48
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c19
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c65
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c22
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/base.c27
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/unittest.c28
-rw-r--r--drivers/pci/host/pci-xgene.c4
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/regulator/core.c7
-rw-r--r--drivers/regulator/da9210-regulator.c9
-rw-r--r--drivers/regulator/rk808-regulator.c8
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk_cluster.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-dw-pci.c4
-rw-r--r--drivers/spi/spi-dw.c4
-rw-r--r--drivers/spi/spi-img-spfi.c7
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-ti-qspi.c22
-rw-r--r--drivers/usb/gadget/function/f_fs.c204
-rw-r--r--drivers/usb/gadget/legacy/inode.c466
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/video/fbdev/amba-clcd.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c6
-rw-r--r--drivers/video/fbdev/omap2/dss/display-sysfs.c179
-rw-r--r--drivers/xen/events/events_base.c18
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.h2
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c61
-rw-r--r--fs/locks.c2
-rw-r--r--fs/nilfs2/segment.c7
-rw-r--r--fs/notify/fanotify/fanotify.c3
-rw-r--r--fs/ocfs2/ocfs2.h2
-rw-r--r--fs/ocfs2/ocfs2_fs.h15
-rw-r--r--include/drm/i915_pciids.h49
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h3
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h3
-rw-r--r--include/linux/clk.h18
-rw-r--r--include/linux/irqchip/arm-gic-v3.h5
-rw-r--r--include/linux/kasan.h9
-rw-r--r--include/linux/moduleloader.h8
-rw-r--r--include/linux/of_platform.h2
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/workqueue.h3
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/soc/at91/at91sam9_ddrsdr.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h2
-rw-r--r--include/uapi/drm/i915_drm.h3
-rw-r--r--include/video/omapdss.h1
-rw-r--r--include/xen/xenbus.h4
-rw-r--r--kernel/cpuset.c9
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/trace/ftrace.c40
-rw-r--r--kernel/workqueue.c56
-rw-r--r--lib/Makefile2
-rw-r--r--lib/iov_iter.c (renamed from mm/iov_iter.c)15
-rw-r--r--lib/seq_buf.c4
-rw-r--r--mm/Makefile2
-rw-r--r--mm/cma.c12
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/kasan/kasan.c14
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/vmalloc.c1
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_sockglue.c33
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv6/datagram.c39
-rw-r--r--net/ipv6/ping.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c3
-rw-r--r--net/netfilter/nf_tables_api.c61
-rw-r--r--net/netfilter/nft_compat.c14
-rw-r--r--net/packet/af_packet.c22
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/tipc/link.c7
-rw-r--r--sound/core/control.c4
-rw-r--r--sound/firewire/dice/dice-interface.h18
-rw-r--r--sound/firewire/dice/dice-proc.c4
-rw-r--r--sound/firewire/iso-resources.c3
-rw-r--r--sound/pci/hda/hda_controller.c2
-rw-r--r--sound/pci/hda/hda_generic.c30
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/soc/fsl/fsl_spdif.c4
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c2
-rw-r--r--sound/usb/quirks-table.h30
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/testing/selftests/exec/execveat.c10
290 files changed, 4289 insertions, 2440 deletions
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index f4445e5a2bbb..1e097037349c 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -22,6 +22,8 @@ Optional Properties:
- pclkN, clkN: Pairs of parent of input clock and input clock to the
devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
are supported currently.
+- power-domains: phandle pointing to the parent power domain, for more details
+ see Documentation/devicetree/bindings/power/power_domain.txt
Node of a device using power domains must have a power-domains property
defined with a phandle to respective power domain.
diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt
index d70ec358736c..8d27f6b084c7 100644
--- a/Documentation/devicetree/bindings/arm/sti.txt
+++ b/Documentation/devicetree/bindings/arm/sti.txt
@@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties:
Required root node property:
compatible = "st,stih407";
+Boards with the ST STiH410 SoC shall have the following properties:
+Required root node property:
+compatible = "st,stih410";
+
Boards with the ST STiH418 SoC shall have the following properties:
Required root node property:
compatible = "st,stih418";
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index cfcc52705ed8..6151999c5dca 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in
APM X-Gene SoC.
Required properties for all the ethernet interfaces:
-- compatible: Should be "apm,xgene-enet"
+- compatible: Should state binding information from the following list,
+ - "apm,xgene-enet": RGMII based 1G interface
+ - "apm,xgene1-sgenet": SGMII based 1G interface
+ - "apm,xgene1-xgenet": XFI based 10G interface
- reg: Address and length of the register set for the device. It contains the
information of registers in the same order as described by reg-names
- reg-names: Should contain the register set names
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 98c16672ab5f..0f8ed3710c66 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -19,6 +19,16 @@ Required properties:
providing multiple PM domains (e.g. power controllers), but can be any value
as specified by device tree binding documentation of particular provider.
+Optional properties:
+ - power-domains : A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle.
+ Some power domains might be powered from another power domain (or have
+ other hardware specific dependencies). For representing such dependency
+ a standard PM domain consumer binding is used. When provided, all domains
+ created by the given provider should be subdomains of the domain
+ specified by this binding. More details about power domain specifier are
+ available in the next section.
+
Example:
power: power-controller@12340000 {
@@ -30,6 +40,25 @@ Example:
The node above defines a power controller that is a PM domain provider and
expects one cell as its phandle argument.
+Example 2:
+
+ parent: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <1>;
+ };
+
+ child: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12341000 0x1000>;
+ power-domains = <&parent 0>;
+ #power-domain-cells = <1>;
+ };
+
+The nodes above define two power controllers: 'parent' and 'child'.
+Domains created by the 'child' power controller are subdomains of '0' power
+domain provided by the 'parent' power controller.
+
==PM domain consumers==
Required properties:
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt
index 91d5ab0e60fc..91d5ab0e60fc 100644
--- a/Documentation/devicetree/bindings/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
new file mode 100644
index 000000000000..ebcbb62c0a76
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
@@ -0,0 +1,19 @@
+ETRAX FS UART
+
+Required properties:
+- compatible : "axis,etraxfs-uart"
+- reg: offset and length of the register set for the device.
+- interrupts: device interrupt
+
+Optional properties:
+- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD
+ line respectively.
+
+Example:
+
+serial@b00260000 {
+ compatible = "axis,etraxfs-uart";
+ reg = <0xb0026000 0x1000>;
+ interrupts = <68>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 56742bc70218..7d44eae7ab0b 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -12,6 +12,9 @@ I. For patch submitters
devicetree@vger.kernel.org
+ and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
+ all of the DT maintainers.
+
3) The Documentation/ portion of the patch should come in the series before
the code implementing the binding.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 389ca1347a77..fae26d014aaf 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -20,6 +20,7 @@ amlogic Amlogic, Inc.
ams AMS AG
amstaos AMS-Taos Inc.
apm Applied Micro Circuits Corporation (APM)
+arasan Arasan Chip Systems
arm ARM Ltd.
armadeus ARMadeus Systems SARL
asahi-kasei Asahi Kasei Corp.
@@ -27,6 +28,7 @@ atmel Atmel Corporation
auo AU Optronics Corporation
avago Avago Technologies
avic Shanghai AVIC Optoelectronics Co., Ltd.
+axis Axis Communications AB
bosch Bosch Sensortec GmbH
brcm Broadcom Corporation
buffalo Buffalo, Inc.
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
index f90e294d7631..a4d869744f59 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
@@ -26,6 +26,11 @@ Optional properties:
- atmel,disable : Should be present if you want to disable the watchdog.
- atmel,idle-halt : Should be present if you want to stop the watchdog when
entering idle state.
+ CAUTION: This property should be used with care, it actually makes the
+ watchdog not counting when the CPU is in idle state, therefore the
+ watchdog reset time depends on mean CPU usage and will not reset at all
+ if the CPU stop working while it is in idle state, which is probably
+ not what you want.
- atmel,dbg-halt : Should be present if you want to stop the watchdog when
entering debug state.
diff --git a/MAINTAINERS b/MAINTAINERS
index ab44a48d53bb..74778886321e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1030,6 +1030,16 @@ F: arch/arm/mach-mxs/
F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
+ARM/FREESCALE VYBRID ARM ARCHITECTURE
+M: Shawn Guo <shawn.guo@linaro.org>
+M: Sascha Hauer <kernel@pengutronix.de>
+R: Stefan Agner <stefan@agner.ch>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+F: arch/arm/mach-imx/*vf610*
+F: arch/arm/boot/dts/vf*
+
ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+M: Gregory Clement <gregory.clement@free-electrons.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-dove/
@@ -2107,7 +2118,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Christian Daudt <bcm@fixthebug.org>
-M: Matt Porter <mporter@linaro.org>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/mach-bcm
@@ -2369,8 +2379,9 @@ F: arch/x86/include/asm/tce.h
CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
+M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
-W: http://gitorious.org/linux-can
+W: https://github.com/linux-can
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
@@ -2386,7 +2397,7 @@ CAN NETWORK DRIVERS
M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
-W: http://gitorious.org/linux-can
+W: https://github.com/linux-can
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
diff --git a/Makefile b/Makefile
index 1100ff3c77e3..e734965b1604 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7f99cd652203..eb7bb511f853 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin
machine-$(CONFIG_ARCH_CLPS711X) += clps711x
machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx
machine-$(CONFIG_ARCH_DAVINCI) += davinci
+machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor
machine-$(CONFIG_ARCH_DOVE) += dove
machine-$(CONFIG_ARCH_EBSA110) += ebsa110
machine-$(CONFIG_ARCH_EFM32) += efm32
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 2c6248d9a9ef..c3255e0c90aa 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -301,3 +301,11 @@
cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
cd-inverted;
};
+
+&aes {
+ status = "okay";
+};
+
+&sham {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 83d40f7655e5..6b8493720424 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -24,11 +24,3 @@
&mmc1 {
vmmc-supply = <&ldo3_reg>;
};
-
-&sham {
- status = "okay";
-};
-
-&aes {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts
index 7266a00aab2e..5c5667a3624d 100644
--- a/arch/arm/boot/dts/am335x-lxm.dts
+++ b/arch/arm/boot/dts/am335x-lxm.dts
@@ -328,6 +328,10 @@
dual_emac_res_vlan = <3>;
};
+&phy_sel {
+ rmii-clock-ext;
+};
+
&mac {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index 712edce7d6fb..071b56aa0c7e 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -99,7 +99,7 @@
ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <0>;
reg = <0x0664>;
};
@@ -107,7 +107,7 @@
ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <1>;
reg = <0x0664>;
};
@@ -115,7 +115,7 @@
ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <2>;
reg = <0x0664>;
};
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index c7dc9dab93a4..cfb49686ab6a 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -107,7 +107,7 @@
ehrpwm0_tbclk: ehrpwm0_tbclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <0>;
reg = <0x0664>;
};
@@ -115,7 +115,7 @@
ehrpwm1_tbclk: ehrpwm1_tbclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <1>;
reg = <0x0664>;
};
@@ -123,7 +123,7 @@
ehrpwm2_tbclk: ehrpwm2_tbclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <2>;
reg = <0x0664>;
};
@@ -131,7 +131,7 @@
ehrpwm3_tbclk: ehrpwm3_tbclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <4>;
reg = <0x0664>;
};
@@ -139,7 +139,7 @@
ehrpwm4_tbclk: ehrpwm4_tbclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <5>;
reg = <0x0664>;
};
@@ -147,7 +147,7 @@
ehrpwm5_tbclk: ehrpwm5_tbclk {
#clock-cells = <0>;
compatible = "ti,gate-clock";
- clocks = <&dpll_per_m2_ck>;
+ clocks = <&l4ls_gclk>;
ti,bit-shift = <6>;
reg = <0x0664>;
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index fff0ee69aab4..e7f0a4ae271c 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -494,12 +494,12 @@
pinctrl_usart3_rts: usart3_rts-0 {
atmel,pins =
- <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC8 periph B */
+ <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>;
};
pinctrl_usart3_cts: usart3_cts-0 {
atmel,pins =
- <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */
+ <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;
};
};
@@ -853,7 +853,7 @@
};
usb1: gadget@fffa4000 {
- compatible = "atmel,at91rm9200-udc";
+ compatible = "atmel,at91sam9260-udc";
reg = <0xfffa4000 0x4000>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&udc_clk>, <&udpck>;
@@ -976,7 +976,6 @@
atmel,watchdog-type = "hardware";
atmel,reset-type = "all";
atmel,dbg-halt;
- atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index e247b0b5fdab..d55fdf2487ef 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -124,11 +124,12 @@
};
usb1: gadget@fffa4000 {
- compatible = "atmel,at91rm9200-udc";
+ compatible = "atmel,at91sam9261-udc";
reg = <0xfffa4000 0x4000>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&udc_clk>, <&udpck>;
- clock-names = "usb_clk", "udc_clk", "udpck";
+ clocks = <&udc_clk>, <&udpck>;
+ clock-names = "pclk", "hclk";
+ atmel,matrix = <&matrix>;
status = "disabled";
};
@@ -262,7 +263,7 @@
};
matrix: matrix@ffffee00 {
- compatible = "atmel,at91sam9260-bus-matrix";
+ compatible = "atmel,at91sam9260-bus-matrix", "syscon";
reg = <0xffffee00 0x200>;
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 1f67bb4c144e..fce301c4e9d6 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -69,7 +69,7 @@
sram1: sram@00500000 {
compatible = "mmio-sram";
- reg = <0x00300000 0x4000>;
+ reg = <0x00500000 0x4000>;
};
ahb {
@@ -856,7 +856,7 @@
};
usb1: gadget@fff78000 {
- compatible = "atmel,at91rm9200-udc";
+ compatible = "atmel,at91sam9263-udc";
reg = <0xfff78000 0x4000>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&udc_clk>, <&udpck>;
@@ -905,7 +905,6 @@
atmel,watchdog-type = "hardware";
atmel,reset-type = "all";
atmel,dbg-halt;
- atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index ee80aa9c0759..488af63d5174 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -1116,7 +1116,6 @@
atmel,watchdog-type = "hardware";
atmel,reset-type = "all";
atmel,dbg-halt;
- atmel,idle-halt;
status = "disabled";
};
@@ -1301,7 +1300,7 @@
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00800000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index c2666a7cb5b1..0c53a375ba99 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -894,7 +894,6 @@
atmel,watchdog-type = "hardware";
atmel,reset-type = "all";
atmel,dbg-halt;
- atmel,idle-halt;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 818dabdd8c0e..d221179d0f1a 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1066,7 +1066,7 @@
reg = <0x00500000 0x80000
0xf803c000 0x400>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&usb>, <&udphs_clk>;
+ clocks = <&utmi>, <&udphs_clk>;
clock-names = "hclk", "pclk";
status = "disabled";
@@ -1130,7 +1130,6 @@
atmel,watchdog-type = "hardware";
atmel,reset-type = "all";
atmel,dbg-halt;
- atmel,idle-halt;
status = "disabled";
};
@@ -1186,7 +1185,7 @@
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ehci_clk", "uhpck";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 3290a96ba586..7563d7ce01bb 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -263,17 +263,15 @@
dcan1_pins_default: dcan1_pins_default {
pinctrl-single,pins = <
- 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */
- 0x3d4 (MUX_MODE15) /* dcan1_rx.off */
- 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
+ 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+ 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
>;
};
dcan1_pins_sleep: dcan1_pins_sleep {
pinctrl-single,pins = <
- 0x3d0 (MUX_MODE15) /* dcan1_tx.off */
- 0x3d4 (MUX_MODE15) /* dcan1_rx.off */
- 0x418 (MUX_MODE15) /* wakeup0.off */
+ 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
+ 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */
>;
};
};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index e0264d0bf7b9..40ed539ce474 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -119,17 +119,15 @@
dcan1_pins_default: dcan1_pins_default {
pinctrl-single,pins = <
- 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */
- 0x3d4 (MUX_MODE15) /* dcan1_rx.off */
- 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
+ 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+ 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
>;
};
dcan1_pins_sleep: dcan1_pins_sleep {
pinctrl-single,pins = <
- 0x3d0 (MUX_MODE15) /* dcan1_tx.off */
- 0x3d4 (MUX_MODE15) /* dcan1_rx.off */
- 0x418 (MUX_MODE15) /* wakeup0.off */
+ 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
+ 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */
>;
};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 4bdcbd61ce47..99b09a44e269 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -243,10 +243,18 @@
ti,invert-autoidle-bit;
};
+ dpll_core_byp_mux: dpll_core_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ ti,bit-shift = <23>;
+ reg = <0x012c>;
+ };
+
dpll_core_ck: dpll_core_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-core-clock";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
};
@@ -309,10 +317,18 @@
clock-div = <1>;
};
+ dpll_dsp_byp_mux: dpll_dsp_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x0240>;
+ };
+
dpll_dsp_ck: dpll_dsp_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+ clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
};
@@ -335,10 +351,18 @@
clock-div = <1>;
};
+ dpll_iva_byp_mux: dpll_iva_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x01ac>;
+ };
+
dpll_iva_ck: dpll_iva_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+ clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
};
@@ -361,10 +385,18 @@
clock-div = <1>;
};
+ dpll_gpu_byp_mux: dpll_gpu_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ ti,bit-shift = <23>;
+ reg = <0x02e4>;
+ };
+
dpll_gpu_ck: dpll_gpu_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
};
@@ -398,10 +430,18 @@
clock-div = <1>;
};
+ dpll_ddr_byp_mux: dpll_ddr_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ ti,bit-shift = <23>;
+ reg = <0x021c>;
+ };
+
dpll_ddr_ck: dpll_ddr_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
};
@@ -416,10 +456,18 @@
ti,invert-autoidle-bit;
};
+ dpll_gmac_byp_mux: dpll_gmac_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ ti,bit-shift = <23>;
+ reg = <0x02b4>;
+ };
+
dpll_gmac_ck: dpll_gmac_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+ clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
};
@@ -482,10 +530,18 @@
clock-div = <1>;
};
+ dpll_eve_byp_mux: dpll_eve_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x0290>;
+ };
+
dpll_eve_ck: dpll_eve_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+ clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
};
@@ -1249,10 +1305,18 @@
clock-div = <1>;
};
+ dpll_per_byp_mux: dpll_per_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x014c>;
+ };
+
dpll_per_ck: dpll_per_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+ clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
};
@@ -1275,10 +1339,18 @@
clock-div = <1>;
};
+ dpll_usb_byp_mux: dpll_usb_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x018c>;
+ };
+
dpll_usb_ck: dpll_usb_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-j-type-clock";
- clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+ clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 277b48b0b6f9..ac6b0ae42caf 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -18,6 +18,7 @@
*/
#include "skeleton.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
#include <dt-bindings/clock/exynos3250.h>
/ {
@@ -193,6 +194,7 @@
interrupts = <0 216 0>;
clocks = <&cmu CLK_TMU_APBIF>;
clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
new file mode 100644
index 000000000000..735cb2f10817
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Device tree sources for Exynos4 thermal zone
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+thermal-zones {
+ cpu_thermal: cpu-thermal {
+ thermal-sensors = <&tmu 0>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ trips {
+ cpu_alert0: cpu-alert-0 {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_alert1: cpu-alert-1 {
+ temperature = <95000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_alert2: cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ };
+ map1 {
+ trip = <&cpu_alert1>;
+ };
+ };
+ };
+};
+};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 76173cacd450..77ea547768f4 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -38,6 +38,7 @@
i2c5 = &i2c_5;
i2c6 = &i2c_6;
i2c7 = &i2c_7;
+ i2c8 = &i2c_8;
csis0 = &csis_0;
csis1 = &csis_1;
fimc0 = &fimc_0;
@@ -104,6 +105,7 @@
compatible = "samsung,exynos4210-pd";
reg = <0x10023C20 0x20>;
#power-domain-cells = <0>;
+ power-domains = <&pd_lcd0>;
};
pd_cam: cam-power-domain@10023C00 {
@@ -554,6 +556,22 @@
status = "disabled";
};
+ i2c_8: i2c@138E0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "samsung,s3c2440-hdmiphy-i2c";
+ reg = <0x138E0000 0x100>;
+ interrupts = <0 93 0>;
+ clocks = <&clock CLK_I2C_HDMI>;
+ clock-names = "i2c";
+ status = "disabled";
+
+ hdmi_i2c_phy: hdmiphy@38 {
+ compatible = "exynos4210-hdmiphy";
+ reg = <0x38>;
+ };
+ };
+
spi_0: spi@13920000 {
compatible = "samsung,exynos4210-spi";
reg = <0x13920000 0x100>;
@@ -663,6 +681,33 @@
status = "disabled";
};
+ tmu: tmu@100C0000 {
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ hdmi: hdmi@12D00000 {
+ compatible = "samsung,exynos4210-hdmi";
+ reg = <0x12D00000 0x70000>;
+ interrupts = <0 92 0>;
+ clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy",
+ "mout_hdmi";
+ clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
+ <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
+ <&clock CLK_MOUT_HDMI>;
+ phy = <&hdmi_i2c_phy>;
+ power-domains = <&pd_tv>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ status = "disabled";
+ };
+
+ mixer: mixer@12C10000 {
+ compatible = "samsung,exynos4210-mixer";
+ interrupts = <0 91 0>;
+ reg = <0x12C10000 0x2100>, <0x12c00000 0x300>;
+ power-domains = <&pd_tv>;
+ status = "disabled";
+ };
+
ppmu_dmc0: ppmu_dmc0@106a0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x106a0000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 3d6652a4b6cb..32c5fd8f6269 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -426,6 +426,25 @@
status = "okay";
};
+ tmu@100C0000 {
+ status = "okay";
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ cooling-maps {
+ map0 {
+ /* Corresponds to 800MHz at freq_table */
+ cooling-device = <&cpu0 2 2>;
+ };
+ map1 {
+ /* Corresponds to 200MHz at freq_table */
+ cooling-device = <&cpu0 4 4>;
+ };
+ };
+ };
+ };
+
camera {
pinctrl-names = "default";
pinctrl-0 = <>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index b57e6b82ea20..d4f2b11319dd 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -505,6 +505,63 @@
assigned-clock-rates = <0>, <160000000>;
};
};
+
+ hdmi_en: voltage-regulator-hdmi-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "HDMI_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpe0 1 0>;
+ enable-active-high;
+ };
+
+ hdmi_ddc: i2c-ddc {
+ compatible = "i2c-gpio";
+ gpios = <&gpe4 2 0 &gpe4 3 0>;
+ i2c-gpio,delay-us = <100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-0 = <&i2c_ddc_bus>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+
+ mixer@12C10000 {
+ status = "okay";
+ };
+
+ hdmi@12D00000 {
+ hpd-gpio = <&gpx3 7 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_hpd>;
+ hdmi-en-supply = <&hdmi_en>;
+ vdd-supply = <&ldo3_reg>;
+ vdd_osc-supply = <&ldo4_reg>;
+ vdd_pll-supply = <&ldo3_reg>;
+ ddc = <&hdmi_ddc>;
+ status = "okay";
+ };
+
+ i2c@138E0000 {
+ status = "okay";
+ };
+};
+
+&pinctrl_1 {
+ hdmi_hpd: hdmi-hpd {
+ samsung,pins = "gpx3-7";
+ samsung,pin-pud = <0>;
+ };
+};
+
+&pinctrl_0 {
+ i2c_ddc_bus: i2c-ddc-bus {
+ samsung,pins = "gpe4-2", "gpe4-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
};
&mdma1 {
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 67c832c9dcf1..be89f83f70e7 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -21,6 +21,7 @@
#include "exynos4.dtsi"
#include "exynos4210-pinctrl.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
/ {
compatible = "samsung,exynos4210", "samsung,exynos4";
@@ -35,10 +36,13 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@900 {
+ cpu0: cpu@900 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x900>;
+ cooling-min-level = <4>;
+ cooling-max-level = <2>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu@901 {
@@ -153,16 +157,38 @@
reg = <0x03860000 0x1000>;
};
- tmu@100C0000 {
+ tmu: tmu@100C0000 {
compatible = "samsung,exynos4210-tmu";
interrupt-parent = <&combiner>;
reg = <0x100C0000 0x100>;
interrupts = <2 4>;
clocks = <&clock CLK_TMU_APBIF>;
clock-names = "tmu_apbif";
+ samsung,tmu_gain = <15>;
+ samsung,tmu_reference_voltage = <7>;
status = "disabled";
};
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tmu 0>;
+
+ trips {
+ cpu_alert0: cpu-alert-0 {
+ temperature = <85000>; /* millicelsius */
+ };
+ cpu_alert1: cpu-alert-1 {
+ temperature = <100000>; /* millicelsius */
+ };
+ cpu_alert2: cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ };
+ };
+ };
+ };
+
g2d@12800000 {
compatible = "samsung,s5pv210-g2d";
reg = <0x12800000 0x1000>;
@@ -203,6 +229,14 @@
};
};
+ mixer: mixer@12C10000 {
+ clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer",
+ "sclk_mixer";
+ clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+ <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>,
+ <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>;
+ };
+
ppmu_lcd1: ppmu_lcd1@12240000 {
compatible = "samsung,exynos-ppmu";
reg = <0x12240000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi
index dd0a43ec56da..5be03288f1ee 100644
--- a/arch/arm/boot/dts/exynos4212.dtsi
+++ b/arch/arm/boot/dts/exynos4212.dtsi
@@ -26,10 +26,13 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@A00 {
+ cpu0: cpu@A00 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA00>;
+ cooling-min-level = <13>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu@A01 {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index de80b5bba204..adb4f6a97a1d 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -249,6 +249,20 @@
regulator-always-on;
};
+ ldo8_reg: ldo@8 {
+ regulator-compatible = "LDO8";
+ regulator-name = "VDD10_HDMI_1.0V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ ldo10_reg: ldo@10 {
+ regulator-compatible = "LDO10";
+ regulator-name = "VDDQ_MIPIHSI_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
ldo11_reg: LDO11 {
regulator-name = "VDD18_ABB1_1.8V";
regulator-min-microvolt = <1800000>;
@@ -411,6 +425,51 @@
ehci: ehci@12580000 {
status = "okay";
};
+
+ tmu@100C0000 {
+ vtmu-supply = <&ldo10_reg>;
+ status = "okay";
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ cooling-maps {
+ map0 {
+ /* Corresponds to 800MHz at freq_table */
+ cooling-device = <&cpu0 7 7>;
+ };
+ map1 {
+ /* Corresponds to 200MHz at freq_table */
+ cooling-device = <&cpu0 13 13>;
+ };
+ };
+ };
+ };
+
+ mixer: mixer@12C10000 {
+ status = "okay";
+ };
+
+ hdmi@12D00000 {
+ hpd-gpio = <&gpx3 7 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_hpd>;
+ vdd-supply = <&ldo8_reg>;
+ vdd_osc-supply = <&ldo10_reg>;
+ vdd_pll-supply = <&ldo8_reg>;
+ ddc = <&hdmi_ddc>;
+ status = "okay";
+ };
+
+ hdmi_ddc: i2c@13880000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_bus>;
+ };
+
+ i2c@138E0000 {
+ status = "okay";
+ };
};
&pinctrl_1 {
@@ -425,4 +484,9 @@
samsung,pin-pud = <0>;
samsung,pin-drv = <0>;
};
+
+ hdmi_hpd: hdmi-hpd {
+ samsung,pins = "gpx3-7";
+ samsung,pin-pud = <1>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
new file mode 100644
index 000000000000..e3f7934d19d0
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
@@ -0,0 +1,24 @@
+/*
+ * Device tree sources for Exynos4412 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <55>;
+samsung,tmu_min_efuse_value = <40>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 21f748083586..173ffa479ad3 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -927,6 +927,21 @@
pulldown-ohm = <100000>; /* 100K */
io-channels = <&adc 2>; /* Battery temperature */
};
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ cooling-maps {
+ map0 {
+ /* Corresponds to 800MHz at freq_table */
+ cooling-device = <&cpu0 7 7>;
+ };
+ map1 {
+ /* Corresponds to 200MHz at freq_table */
+ cooling-device = <&cpu0 13 13>;
+ };
+ };
+ };
+ };
};
&pmu_system_controller {
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 0f6ec93bb1d8..68ad43b391ae 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -26,10 +26,13 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@A00 {
+ cpu0: cpu@A00 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0xA00>;
+ cooling-min-level = <13>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu@A01 {
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index f5e0ae780d6c..6a6abe14fd9b 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -19,6 +19,7 @@
#include "exynos4.dtsi"
#include "exynos4x12-pinctrl.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
/ {
aliases {
@@ -297,4 +298,15 @@
clock-names = "tmu_apbif";
status = "disabled";
};
+
+ hdmi: hdmi@12D00000 {
+ compatible = "samsung,exynos4212-hdmi";
+ };
+
+ mixer: mixer@12C10000 {
+ compatible = "samsung,exynos4212-mixer";
+ clock-names = "mixer", "hdmi", "sclk_hdmi", "vp";
+ clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+ <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 9bb1b0b738f5..adbde1adad95 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -20,7 +20,7 @@
#include <dt-bindings/clock/exynos5250.h>
#include "exynos5.dtsi"
#include "exynos5250-pinctrl.dtsi"
-
+#include "exynos4-cpu-thermal.dtsi"
#include <dt-bindings/clock/exynos-audss-clk.h>
/ {
@@ -58,11 +58,14 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0>;
clock-frequency = <1700000000>;
+ cooling-min-level = <15>;
+ cooling-max-level = <9>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu@1 {
device_type = "cpu";
@@ -102,6 +105,12 @@
#power-domain-cells = <0>;
};
+ pd_disp1: disp1-power-domain@100440A0 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x100440A0 0x20>;
+ #power-domain-cells = <0>;
+ };
+
clock: clock-controller@10010000 {
compatible = "samsung,exynos5250-clock";
reg = <0x10010000 0x30000>;
@@ -235,12 +244,32 @@
status = "disabled";
};
- tmu@10060000 {
+ tmu: tmu@10060000 {
compatible = "samsung,exynos5250-tmu";
reg = <0x10060000 0x100>;
interrupts = <0 65 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tmu 0>;
+
+ cooling-maps {
+ map0 {
+ /* Corresponds to 800MHz at freq_table */
+ cooling-device = <&cpu0 9 9>;
+ };
+ map1 {
+ /* Corresponds to 200MHz at freq_table */
+ cooling-device = <&cpu0 15 15>;
+ };
+ };
+ };
};
serial@12C00000 {
@@ -719,6 +748,7 @@
hdmi: hdmi {
compatible = "samsung,exynos4212-hdmi";
reg = <0x14530000 0x70000>;
+ power-domains = <&pd_disp1>;
interrupts = <0 95 0>;
clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
<&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
@@ -731,9 +761,11 @@
mixer {
compatible = "samsung,exynos5250-mixer";
reg = <0x14450000 0x10000>;
+ power-domains = <&pd_disp1>;
interrupts = <0 94 0>;
- clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>;
- clock-names = "mixer", "sclk_hdmi";
+ clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+ <&clock CLK_SCLK_HDMI>;
+ clock-names = "mixer", "hdmi", "sclk_hdmi";
};
dp_phy: video-phy@10040720 {
@@ -743,6 +775,7 @@
};
dp: dp-controller@145B0000 {
+ power-domains = <&pd_disp1>;
clocks = <&clock CLK_DP>;
clock-names = "dp";
phys = <&dp_phy>;
@@ -750,6 +783,7 @@
};
fimd: fimd@14400000 {
+ power-domains = <&pd_disp1>;
clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
clock-names = "sclk_fimd", "fimd";
};
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
new file mode 100644
index 000000000000..5d31fc140823
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Device tree sources for default Exynos5420 thermal zone definition
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+polling-delay-passive = <0>;
+polling-delay = <0>;
+trips {
+ cpu-alert-0 {
+ temperature = <85000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "active";
+ };
+ cpu-alert-1 {
+ temperature = <103000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "active";
+ };
+ cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "active";
+ };
+ cpu-crit-0 {
+ temperature = <1200000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 9dc2e9773b30..c0e98cf3514f 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -740,8 +740,9 @@
compatible = "samsung,exynos5420-mixer";
reg = <0x14450000 0x10000>;
interrupts = <0 94 0>;
- clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>;
- clock-names = "mixer", "sclk_hdmi";
+ clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+ <&clock CLK_SCLK_HDMI>;
+ clock-names = "mixer", "hdmi", "sclk_hdmi";
power-domains = <&disp_pd>;
};
@@ -782,6 +783,7 @@
interrupts = <0 65 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
};
tmu_cpu1: tmu@10064000 {
@@ -790,6 +792,7 @@
interrupts = <0 183 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
};
tmu_cpu2: tmu@10068000 {
@@ -798,6 +801,7 @@
interrupts = <0 184 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
};
tmu_cpu3: tmu@1006c000 {
@@ -806,6 +810,7 @@
interrupts = <0 185 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
};
tmu_gpu: tmu@100a0000 {
@@ -814,6 +819,30 @@
interrupts = <0 215 0>;
clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ thermal-zones {
+ cpu0_thermal: cpu0-thermal {
+ thermal-sensors = <&tmu_cpu0>;
+ #include "exynos5420-trip-points.dtsi"
+ };
+ cpu1_thermal: cpu1-thermal {
+ thermal-sensors = <&tmu_cpu1>;
+ #include "exynos5420-trip-points.dtsi"
+ };
+ cpu2_thermal: cpu2-thermal {
+ thermal-sensors = <&tmu_cpu2>;
+ #include "exynos5420-trip-points.dtsi"
+ };
+ cpu3_thermal: cpu3-thermal {
+ thermal-sensors = <&tmu_cpu3>;
+ #include "exynos5420-trip-points.dtsi"
+ };
+ gpu_thermal: gpu-thermal {
+ thermal-sensors = <&tmu_gpu>;
+ #include "exynos5420-trip-points.dtsi"
+ };
};
watchdog: watchdog@101D0000 {
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
new file mode 100644
index 000000000000..7b2fba0ae92b
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
@@ -0,0 +1,24 @@
+/*
+ * Device tree sources for Exynos5440 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <5>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <0x5d2d>;
+samsung,tmu_min_efuse_value = <16>;
+samsung,tmu_max_efuse_value = <76>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <70>;
+samsung,tmu_default_temp_offset = <25>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
new file mode 100644
index 000000000000..48adfa8f4300
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for default Exynos5440 thermal zone definition
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+polling-delay-passive = <0>;
+polling-delay = <0>;
+trips {
+ cpu-alert-0 {
+ temperature = <100000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "active";
+ };
+ cpu-crit-0 {
+ temperature = <1050000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 8f3373cd7b87..59d9416b3b03 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -219,6 +219,7 @@
interrupts = <0 58 0>;
clocks = <&clock CLK_B_125>;
clock-names = "tmu_apbif";
+ #include "exynos5440-tmu-sensor-conf.dtsi"
};
tmuctrl_1: tmuctrl@16011C {
@@ -227,6 +228,7 @@
interrupts = <0 58 0>;
clocks = <&clock CLK_B_125>;
clock-names = "tmu_apbif";
+ #include "exynos5440-tmu-sensor-conf.dtsi"
};
tmuctrl_2: tmuctrl@160120 {
@@ -235,6 +237,22 @@
interrupts = <0 58 0>;
clocks = <&clock CLK_B_125>;
clock-names = "tmu_apbif";
+ #include "exynos5440-tmu-sensor-conf.dtsi"
+ };
+
+ thermal-zones {
+ cpu0_thermal: cpu0-thermal {
+ thermal-sensors = <&tmuctrl_0>;
+ #include "exynos5440-trip-points.dtsi"
+ };
+ cpu1_thermal: cpu1-thermal {
+ thermal-sensors = <&tmuctrl_1>;
+ #include "exynos5440-trip-points.dtsi"
+ };
+ cpu2_thermal: cpu2-thermal {
+ thermal-sensors = <&tmuctrl_2>;
+ #include "exynos5440-trip-points.dtsi"
+ };
};
sata@210000 {
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index f1cd2147421d..a626e6dd8022 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -35,6 +35,7 @@
regulator-max-microvolt = <5000000>;
gpio = <&gpio3 22 0>;
enable-active-high;
+ vin-supply = <&swbst_reg>;
};
reg_usb_h1_vbus: regulator@1 {
@@ -45,6 +46,7 @@
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 29 0>;
enable-active-high;
+ vin-supply = <&swbst_reg>;
};
reg_audio: regulator@2 {
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index fda4932faefd..945887d3fdb3 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -52,6 +52,7 @@
regulator-max-microvolt = <5000000>;
gpio = <&gpio4 0 0>;
enable-active-high;
+ vin-supply = <&swbst_reg>;
};
reg_usb_otg2_vbus: regulator@1 {
@@ -62,6 +63,7 @@
regulator-max-microvolt = <5000000>;
gpio = <&gpio4 2 0>;
enable-active-high;
+ vin-supply = <&swbst_reg>;
};
reg_aud3v: regulator@2 {
diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi
index 19212ac6eef0..de8a3d456cf7 100644
--- a/arch/arm/boot/dts/omap5-core-thermal.dtsi
+++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi
@@ -13,7 +13,7 @@
core_thermal: core_thermal {
polling-delay-passive = <250>; /* milliseconds */
- polling-delay = <1000>; /* milliseconds */
+ polling-delay = <500>; /* milliseconds */
/* sensor ID */
thermal-sensors = <&bandgap 2>;
diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
index 1b87aca88b77..bc3090f2e84b 100644
--- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
+++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
@@ -13,7 +13,7 @@
gpu_thermal: gpu_thermal {
polling-delay-passive = <250>; /* milliseconds */
- polling-delay = <1000>; /* milliseconds */
+ polling-delay = <500>; /* milliseconds */
/* sensor ID */
thermal-sensors = <&bandgap 1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index ddff674bd05e..4a485b63a141 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -1079,4 +1079,8 @@
};
};
+&cpu_thermal {
+ polling-delay = <500>; /* milliseconds */
+};
+
/include/ "omap54xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index 58c27466f012..83b425fb3ac2 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -167,10 +167,18 @@
ti,index-starts-at-one;
};
+ dpll_core_byp_mux: dpll_core_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+ ti,bit-shift = <23>;
+ reg = <0x012c>;
+ };
+
dpll_core_ck: dpll_core_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-core-clock";
- clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+ clocks = <&sys_clkin>, <&dpll_core_byp_mux>;
reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
};
@@ -294,10 +302,18 @@
clock-div = <1>;
};
+ dpll_iva_byp_mux: dpll_iva_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x01ac>;
+ };
+
dpll_iva_ck: dpll_iva_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+ clocks = <&sys_clkin>, <&dpll_iva_byp_mux>;
reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
};
@@ -599,10 +615,19 @@
};
};
&cm_core_clocks {
+
+ dpll_per_byp_mux: dpll_per_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x014c>;
+ };
+
dpll_per_ck: dpll_per_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-clock";
- clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+ clocks = <&sys_clkin>, <&dpll_per_byp_mux>;
reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
};
@@ -714,10 +739,18 @@
ti,index-starts-at-one;
};
+ dpll_usb_byp_mux: dpll_usb_byp_mux {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+ ti,bit-shift = <23>;
+ reg = <0x018c>;
+ };
+
dpll_usb_ck: dpll_usb_ck {
#clock-cells = <0>;
compatible = "ti,omap4-dpll-j-type-clock";
- clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+ clocks = <&sys_clkin>, <&dpll_usb_byp_mux>;
reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
};
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 261311bdf65b..367af53c1b84 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1248,7 +1248,6 @@
atmel,watchdog-type = "hardware";
atmel,reset-type = "all";
atmel,dbg-halt;
- atmel,idle-halt;
status = "disabled";
};
@@ -1416,7 +1415,7 @@
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ehci_clk", "uhpck";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index d986b41b9654..4303874889c6 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -66,6 +66,7 @@
gpio4 = &pioE;
tcb0 = &tcb0;
tcb1 = &tcb1;
+ i2c0 = &i2c0;
i2c2 = &i2c2;
};
cpus {
@@ -259,7 +260,7 @@
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00600000 0x100000>;
interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ehci_clk", "uhpck";
status = "disabled";
};
@@ -461,8 +462,8 @@
lcdck: lcdck {
#clock-cells = <0>;
- reg = <4>;
- clocks = <&smd>;
+ reg = <3>;
+ clocks = <&mck>;
};
smdck: smdck {
@@ -770,7 +771,7 @@
reg = <50>;
};
- lcd_clk: lcd_clk {
+ lcdc_clk: lcdc_clk {
#clock-cells = <0>;
reg = <51>;
};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 252c3d1bda50..9d8760956752 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -713,6 +713,9 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&l4_sp_clk>;
+ dmas = <&pdma 28>,
+ <&pdma 29>;
+ dma-names = "tx", "rx";
};
uart1: serial1@ffc03000 {
@@ -722,6 +725,9 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&l4_sp_clk>;
+ dmas = <&pdma 30>,
+ <&pdma 31>;
+ dma-names = "tx", "rx";
};
rst: rstmgr@ffd05000 {
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index f2670f638e97..811e72bbe642 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -70,6 +70,7 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
+CONFIG_ARM_AT91_ETHER=y
CONFIG_MACB=y
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_DM9000=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b7e6b6fba5e0..06075b6d2463 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -99,7 +99,7 @@ CONFIG_PCI_RCAR_GEN2=y
CONFIG_PCI_RCAR_GEN2_PCIE=y
CONFIG_PCIEPORTBUS=y
CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=16
CONFIG_HIGHPTE=y
CONFIG_CMA=y
CONFIG_ARM_APPENDED_DTB=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index a097cffa1231..8e108599e1af 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -377,6 +377,7 @@ CONFIG_PWM_TWL=m
CONFIG_PWM_TWL_LED=m
CONFIG_OMAP_USB2=m
CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 41d856effe6c..510c747c65b4 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -3,8 +3,6 @@
CONFIG_SYSVIPC=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 38840a812924..8f6a5702b696 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_PERF_EVENTS=y
CONFIG_ARCH_SUNXI=y
CONFIG_SMP=y
+CONFIG_NR_CPUS=8
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_HIGHPTE=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index f489fdaa19b8..37fe607a4ede 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
-CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_ISP1760=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S
index 80a6501b4d50..c3c45e628e33 100644
--- a/arch/arm/include/debug/at91.S
+++ b/arch/arm/include/debug/at91.S
@@ -18,8 +18,11 @@
#define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */
#endif
-/* Keep in sync with mach-at91/include/mach/hardware.h */
+#ifdef CONFIG_MMU
#define AT91_IO_P2V(x) ((x) - 0x01000000)
+#else
+#define AT91_IO_P2V(x) (x)
+#endif
#define AT91_DBGU_SR (0x14) /* Status Register */
#define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 5e34fb143309..aa4116e9452f 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void)
phys_addr_t sram_pbase;
unsigned long sram_base;
struct device_node *node;
- struct platform_device *pdev;
+ struct platform_device *pdev = NULL;
- node = of_find_compatible_node(NULL, NULL, "mmio-sram");
- if (!node) {
- pr_warn("%s: failed to find sram node!\n", __func__);
- return;
+ for_each_compatible_node(node, NULL, "mmio-sram") {
+ pdev = of_find_device_by_node(node);
+ if (pdev) {
+ of_node_put(node);
+ break;
+ }
}
- pdev = of_find_device_by_node(node);
if (!pdev) {
pr_warn("%s: failed to find sram device!\n", __func__);
- goto put_node;
+ return;
}
sram_pool = dev_get_gen_pool(&pdev->dev);
if (!sram_pool) {
pr_warn("%s: sram pool unavailable!\n", __func__);
- goto put_node;
+ return;
}
sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz);
if (!sram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
- goto put_node;
+ return;
}
sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false);
-
-put_node:
- of_node_put(node);
}
#endif
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index d2c89963af2d..86c0aa819d25 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void)
" mcr p15, 0, %0, c7, c0, 4\n\t"
" str %5, [%1, %2]"
:
- : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
+ : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
"r" (1), "r" (AT91RM9200_SDRAMC_SRR),
"r" (lpr));
}
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index 556151e85ec4..931f0e302c03 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -25,11 +25,6 @@
*/
#undef SLOWDOWN_MASTER_CLOCK
-#define MCKRDY_TIMEOUT 1000
-#define MOSCRDY_TIMEOUT 1000
-#define PLLALOCK_TIMEOUT 1000
-#define PLLBLOCK_TIMEOUT 1000
-
pmc .req r0
sdramc .req r1
ramc1 .req r2
@@ -41,60 +36,42 @@ tmp2 .req r5
* Wait until master clock is ready (after switching master clock source)
*/
.macro wait_mckrdy
- mov tmp2, #MCKRDY_TIMEOUT
-1: sub tmp2, tmp2, #1
- cmp tmp2, #0
- beq 2f
- ldr tmp1, [pmc, #AT91_PMC_SR]
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_MCKRDY
beq 1b
-2:
.endm
/*
* Wait until master oscillator has stabilized.
*/
.macro wait_moscrdy
- mov tmp2, #MOSCRDY_TIMEOUT
-1: sub tmp2, tmp2, #1
- cmp tmp2, #0
- beq 2f
- ldr tmp1, [pmc, #AT91_PMC_SR]
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_MOSCS
beq 1b
-2:
.endm
/*
* Wait until PLLA has locked.
*/
.macro wait_pllalock
- mov tmp2, #PLLALOCK_TIMEOUT
-1: sub tmp2, tmp2, #1
- cmp tmp2, #0
- beq 2f
- ldr tmp1, [pmc, #AT91_PMC_SR]
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_LOCKA
beq 1b
-2:
.endm
/*
* Wait until PLLB has locked.
*/
.macro wait_pllblock
- mov tmp2, #PLLBLOCK_TIMEOUT
-1: sub tmp2, tmp2, #1
- cmp tmp2, #0
- beq 2f
- ldr tmp1, [pmc, #AT91_PMC_SR]
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
tst tmp1, #AT91_PMC_LOCKB
beq 1b
-2:
.endm
.text
+ .arm
+
/* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc,
* void __iomem *ramc1, int memctrl)
*/
@@ -134,6 +111,16 @@ ddr_sr_enable:
cmp memctrl, #AT91_MEMCTRL_DDRSDR
bne sdr_sr_enable
+ /* LPDDR1 --> force DDR2 mode during self-refresh */
+ ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+ str tmp1, .saved_sam9_mdr
+ bic tmp1, tmp1, #~AT91_DDRSDRC_MD
+ cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+ ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+ biceq tmp1, tmp1, #AT91_DDRSDRC_MD
+ orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2
+ streq tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+
/* prepare for DDRAM self-refresh mode */
ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR]
str tmp1, .saved_sam9_lpr
@@ -142,14 +129,26 @@ ddr_sr_enable:
/* figure out if we use the second ram controller */
cmp ramc1, #0
- ldrne tmp2, [ramc1, #AT91_DDRSDRC_LPR]
- strne tmp2, .saved_sam9_lpr1
- bicne tmp2, #AT91_DDRSDRC_LPCB
- orrne tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
+ beq ddr_no_2nd_ctrl
+
+ ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+ str tmp2, .saved_sam9_mdr1
+ bic tmp2, tmp2, #~AT91_DDRSDRC_MD
+ cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+ ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+ biceq tmp2, tmp2, #AT91_DDRSDRC_MD
+ orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2
+ streq tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+
+ ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR]
+ str tmp2, .saved_sam9_lpr1
+ bic tmp2, #AT91_DDRSDRC_LPCB
+ orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
/* Enable DDRAM self-refresh mode */
+ str tmp2, [ramc1, #AT91_DDRSDRC_LPR]
+ddr_no_2nd_ctrl:
str tmp1, [sdramc, #AT91_DDRSDRC_LPR]
- strne tmp2, [ramc1, #AT91_DDRSDRC_LPR]
b sdr_sr_done
@@ -208,6 +207,7 @@ sdr_sr_done:
/* Turn off the main oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
bic tmp1, tmp1, #AT91_PMC_MOSCEN
+ orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
/* Wait for interrupt */
@@ -216,6 +216,7 @@ sdr_sr_done:
/* Turn on the main oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
orr tmp1, tmp1, #AT91_PMC_MOSCEN
+ orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
wait_moscrdy
@@ -280,12 +281,17 @@ sdr_sr_done:
*/
cmp memctrl, #AT91_MEMCTRL_DDRSDR
bne sdr_en_restore
+ /* Restore MDR in case of LPDDR1 */
+ ldr tmp1, .saved_sam9_mdr
+ str tmp1, [sdramc, #AT91_DDRSDRC_MDR]
/* Restore LPR on AT91 with DDRAM */
ldr tmp1, .saved_sam9_lpr
str tmp1, [sdramc, #AT91_DDRSDRC_LPR]
/* if we use the second ram controller */
cmp ramc1, #0
+ ldrne tmp2, .saved_sam9_mdr1
+ strne tmp2, [ramc1, #AT91_DDRSDRC_MDR]
ldrne tmp2, .saved_sam9_lpr1
strne tmp2, [ramc1, #AT91_DDRSDRC_LPR]
@@ -319,5 +325,11 @@ ram_restored:
.saved_sam9_lpr1:
.word 0
+.saved_sam9_mdr:
+ .word 0
+
+.saved_sam9_mdr1:
+ .word 0
+
ENTRY(at91_slow_clock_sz)
.word .-at91_slow_clock
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 3f32c47a6d74..d2e9f12d12f1 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
*/
void exynos_cpu_power_down(int cpu)
{
- if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
- of_machine_is_compatible("samsung,exynos5800"))) {
+ if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
/*
* Bypass power down for CPU0 during suspend. Check for
* the SYS_PWR_REG value to decide if we are suspending
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 20f267121b3e..37266a826437 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -161,6 +161,34 @@ no_clk:
of_genpd_add_provider_simple(np, &pd->pd);
}
+ /* Assign the child power domains to their parents */
+ for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+ struct generic_pm_domain *child_domain, *parent_domain;
+ struct of_phandle_args args;
+
+ args.np = np;
+ args.args_count = 0;
+ child_domain = of_genpd_get_from_provider(&args);
+ if (!child_domain)
+ continue;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &args) != 0)
+ continue;
+
+ parent_domain = of_genpd_get_from_provider(&args);
+ if (!parent_domain)
+ continue;
+
+ if (pm_genpd_add_subdomain(parent_domain, child_domain))
+ pr_warn("%s failed to add subdomain: %s\n",
+ parent_domain->name, child_domain->name);
+ else
+ pr_info("%s has as child subdomain: %s.\n",
+ parent_domain->name, child_domain->name);
+ of_node_put(np);
+ }
+
return 0;
}
arch_initcall(exynos4_pm_init_power_domain);
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 52e2b1a2fddb..318d127df147 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3;
static u32 exynos_irqwake_intmask = 0xffffffff;
static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
- { 73, BIT(1) }, /* RTC alarm */
- { 74, BIT(2) }, /* RTC tick */
+ { 105, BIT(1) }, /* RTC alarm */
+ { 106, BIT(2) }, /* RTC tick */
{ /* sentinel */ },
};
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 4ad6e473cf83..9de3412af406 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void)
* set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad
* (external OSC), and we need to clear the bit.
*/
- clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
- IMX6Q_GPR1_ENET_CLK_SEL_PAD;
+ clksel = clk_is_match(ptp_clk, enet_ref) ?
+ IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
+ IMX6Q_GPR1_ENET_CLK_SEL_PAD;
gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (!IS_ERR(gpr))
regmap_update_bits(gpr, IOMUXC_GPR1,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 92afb723dcfc..355b08936871 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
if (ret == -EBUSY)
pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name);
- if (!ret) {
+ if (oh->clkdm) {
/*
* Set the clockdomain to HW_AUTO, assuming that the
* previous state was HW_AUTO.
*/
- if (oh->clkdm && hwsup)
+ if (hwsup)
clkdm_allow_idle(oh->clkdm);
- } else {
- if (oh->clkdm)
- clkdm_hwmod_disable(oh->clkdm, oh);
+
+ clkdm_hwmod_disable(oh->clkdm, oh);
}
return ret;
@@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh)
INIT_LIST_HEAD(&oh->master_ports);
INIT_LIST_HEAD(&oh->slave_ports);
spin_lock_init(&oh->_lock);
+ lockdep_set_class(&oh->_lock, &oh->hwmod_key);
oh->_state = _HWMOD_STATE_REGISTERED;
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 9d4bec6ee742..9611c91d9b82 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -674,6 +674,7 @@ struct omap_hwmod {
u32 _sysc_cache;
void __iomem *_mpu_rt_va;
spinlock_t _lock;
+ struct lock_class_key hwmod_key; /* unique lock class */
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
unsigned int (*xlate_irq)(unsigned int);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index e8692e7675b8..16fe7a1b7a35 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = {
*
*/
-static struct omap_hwmod_class dra7xx_pcie_hwmod_class = {
+static struct omap_hwmod_class dra7xx_pciess_hwmod_class = {
.name = "pcie",
};
/* pcie1 */
-static struct omap_hwmod dra7xx_pcie1_hwmod = {
+static struct omap_hwmod dra7xx_pciess1_hwmod = {
.name = "pcie1",
- .class = &dra7xx_pcie_hwmod_class,
+ .class = &dra7xx_pciess_hwmod_class,
.clkdm_name = "pcie_clkdm",
.main_clk = "l4_root_clk_div",
.prcm = {
.omap4 = {
- .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* pcie2 */
-static struct omap_hwmod dra7xx_pcie2_hwmod = {
- .name = "pcie2",
- .class = &dra7xx_pcie_hwmod_class,
- .clkdm_name = "pcie_clkdm",
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'PCIE PHY' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = {
- .name = "pcie-phy",
-};
-
-/* pcie1 phy */
-static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
- .name = "pcie1-phy",
- .class = &dra7xx_pcie_phy_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
.clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET,
.context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET,
.modulemode = MODULEMODE_SWCTRL,
@@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
},
};
-/* pcie2 phy */
-static struct omap_hwmod dra7xx_pcie2_phy_hwmod = {
- .name = "pcie2-phy",
- .class = &dra7xx_pcie_phy_hwmod_class,
- .clkdm_name = "l3init_clkdm",
+/* pcie2 */
+static struct omap_hwmod dra7xx_pciess2_hwmod = {
+ .name = "pcie2",
+ .class = &dra7xx_pciess_hwmod_class,
+ .clkdm_name = "pcie_clkdm",
.main_clk = "l4_root_clk_div",
.prcm = {
.omap4 = {
@@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3_main_1 -> pcie1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = {
+/* l3_main_1 -> pciess1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = {
.master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_pcie1_hwmod,
+ .slave = &dra7xx_pciess1_hwmod,
.clk = "l3_iclk_div",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_cfg -> pcie1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = {
+/* l4_cfg -> pciess1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = {
.master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_pcie1_hwmod,
+ .slave = &dra7xx_pciess1_hwmod,
.clk = "l4_root_clk_div",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3_main_1 -> pcie2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = {
+/* l3_main_1 -> pciess2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = {
.master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_pcie2_hwmod,
+ .slave = &dra7xx_pciess2_hwmod,
.clk = "l3_iclk_div",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_cfg -> pcie2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_pcie2_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pcie1 phy */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_pcie1_phy_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pcie2 phy */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = {
+/* l4_cfg -> pciess2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = {
.master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_pcie2_phy_hwmod,
+ .slave = &dra7xx_pciess2_hwmod,
.clk = "l4_root_clk_div",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l4_cfg__mpu,
&dra7xx_l4_cfg__ocp2scp1,
&dra7xx_l4_cfg__ocp2scp3,
- &dra7xx_l3_main_1__pcie1,
- &dra7xx_l4_cfg__pcie1,
- &dra7xx_l3_main_1__pcie2,
- &dra7xx_l4_cfg__pcie2,
- &dra7xx_l4_cfg__pcie1_phy,
- &dra7xx_l4_cfg__pcie2_phy,
+ &dra7xx_l3_main_1__pciess1,
+ &dra7xx_l4_cfg__pciess1,
+ &dra7xx_l3_main_1__pciess2,
+ &dra7xx_l4_cfg__pciess2,
&dra7xx_l3_main_1__qspi,
&dra7xx_l4_per3__rtcss,
&dra7xx_l4_cfg__sata,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 190fa43e7479..e642b079e9f3 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void)
static void __init omap3_evm_legacy_init(void)
{
+ hsmmc2_internal_input_clk();
legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149);
}
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index a08a617a6c11..d6d6bc39e05c 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
{
saved_mask[0] =
omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
- OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
saved_mask[1] =
omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
- OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+ OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
OMAP4_PRM_IRQENABLE_MPU_OFFSET);
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 7d8eab857a93..f6d02e4cbcda 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -36,6 +36,7 @@
#include <linux/platform_data/video-pxafb.h>
#include <mach/bitfield.h>
#include <linux/platform_data/mmc-pxamci.h>
+#include <linux/smc91x.h>
#include "generic.h"
#include "devices.h"
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 28da319d389f..eaee2c20b189 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = {
};
struct smc91x_platdata smc91x_platdata = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
};
static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 7b0cd3172354..af868d258e66 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev)
.id = 0,
.res = smc91x_resources,
.num_res = ARRAY_SIZE(smc91x_resources),
- .data = &smc91c_platdata,
- .size_data = sizeof(smc91c_platdata),
+ .data = &smc91x_platdata,
+ .size_data = sizeof(smc91x_platdata),
};
int ret, irq;
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 696fd0fe4806..1525d7b5f1b7 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -54,7 +54,7 @@ static struct platform_device smc91x_device = {
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
- .platform_data = &smc91c_platdata,
+ .platform_data = &smc91x_platdata,
},
};
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 483cb467bf65..a0f3b1cd497c 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end;
extern unsigned long socfpga_cpu1start_addr;
-#define SOCFPGA_SCU_VIRT_BASE 0xfffec000
+#define SOCFPGA_SCU_VIRT_BASE 0xfee00000
#endif
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 383d61e138af..f5e597c207b9 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -23,6 +23,7 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
#include "core.h"
@@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void)
(u32 *) &socfpga_cpu1start_addr))
pr_err("SMP: Need cpu1-start-addr in device tree.\n");
+ /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */
+ smp_wmb();
+ sync_cache_w(&socfpga_cpu1start_addr);
+
sys_manager_base_addr = of_iomap(np, 0);
np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr");
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index b067390cef4e..b373acade338 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = {
"st,stih415",
"st,stih416",
"st,stih407",
+ "st,stih410",
"st,stih418",
NULL
};
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index f1ad9c2ab2e9..a857794432d6 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -622,7 +622,7 @@
};
sgenet0: ethernet@1f210000 {
- compatible = "apm,xgene-enet";
+ compatible = "apm,xgene1-sgenet";
status = "disabled";
reg = <0x0 0x1f210000 0x0 0xd100>,
<0x0 0x1f200000 0x0 0Xc300>,
@@ -636,7 +636,7 @@
};
xgenet: ethernet@1f610000 {
- compatible = "apm,xgene-enet";
+ compatible = "apm,xgene1-xgenet";
status = "disabled";
reg = <0x0 0x1f610000 0x0 0xd100>,
<0x0 0x1f600000 0x0 0Xc300>,
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c028fe37456f..53d9c354219f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
+ __flush_tlb_pgtable(tlb->mm, addr);
pgtable_page_dtor(pte);
tlb_remove_entry(tlb, pte);
}
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
+ __flush_tlb_pgtable(tlb->mm, addr);
tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
+ __flush_tlb_pgtable(tlb->mm, addr);
tlb_remove_entry(tlb, virt_to_page(pudp));
}
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 4abe9b945f77..c3bb05b98616 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
}
/*
+ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
+ * table levels (pgd/pud/pmd).
+ */
+static inline void __flush_tlb_pgtable(struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+
+ dsb(ishst);
+ asm("tlbi vae1is, %0" : : "r" (addr));
+ dsb(ish);
+}
+/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index b42c7b480e1e..2b8d70164428 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -354,3 +354,12 @@ void efi_virtmap_unload(void)
efi_set_pgd(current->active_mm);
preempt_enable();
}
+
+/*
+ * UpdateCapsule() depends on the system being shutdown via
+ * ResetSystem().
+ */
+bool efi_poweroff_required(void)
+{
+ return efi_enabled(EFI_RUNTIME_SERVICES);
+}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 8ce88e08c030..07f930540f4a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag)
* zeroing of .bss would clobber it.
*/
.pushsection .data..cacheline_aligned
-ENTRY(__boot_cpu_mode)
.align L1_CACHE_SHIFT
+ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
.long 0
.popsection
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fde9923af859..c6b1f3b96f45 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -21,6 +21,7 @@
#include <stdarg.h>
#include <linux/compat.h>
+#include <linux/efi.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -150,6 +151,13 @@ void machine_restart(char *cmd)
local_irq_disable();
smp_send_stop();
+ /*
+ * UpdateCapsule() depends on the system being reset via
+ * ResetSystem().
+ */
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_reboot(reboot_mode, NULL);
+
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
arm_pm_restart(reboot_mode, cmd);
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 78d4483ba40c..ec4db6df5e0d 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page;
*/
#define pgtable_cache_init() do { } while (0)
+/*
+ * c6x is !MMU, so define the simpliest implementation
+ */
+#define pgprot_writecombine pgprot_noncached
+
#include <asm-generic/pgtable.h>
#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 0536bc021cc6..ef548510b951 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception):
* The LP register should point to the location where the called function
* should return. [note that MAKE_SYS_CALL uses label 1] */
/* See if the system call number is valid */
+ blti r12, 5f
addi r11, r12, -__NR_syscalls;
- bgei r11,5f;
+ bgei r11, 5f;
/* Figure out which function to use for this system call. */
/* Note Microblaze barrel shift is optional, so don't rely on it */
add r12, r12, r12; /* convert num -> ptr */
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception):
/* The syscall number is invalid, return an error. */
5:
- rtsd r15, 8; /* looks like a normal subroutine return */
+ braid ret_from_trap
addi r3, r0, -ENOSYS;
/* Entry point used to return from a syscall/trap */
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap):
bri 1b
/* Maybe handle a signal */
-5:
+5:
andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
beqi r11, 4f; /* Signals to handle, handle them */
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
index 20fb1cf2dab6..642462144872 100644
--- a/arch/nios2/include/asm/ptrace.h
+++ b/arch/nios2/include/asm/ptrace.h
@@ -15,7 +15,54 @@
#include <uapi/asm/ptrace.h>
+/* This struct defines the way the registers are stored on the
+ stack during a system call. */
+
#ifndef __ASSEMBLY__
+struct pt_regs {
+ unsigned long r8; /* r8-r15 Caller-saved GP registers */
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r1; /* Assembler temporary */
+ unsigned long r2; /* Retval LS 32bits */
+ unsigned long r3; /* Retval MS 32bits */
+ unsigned long r4; /* r4-r7 Register arguments */
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long orig_r2; /* Copy of r2 ?? */
+ unsigned long ra; /* Return address */
+ unsigned long fp; /* Frame pointer */
+ unsigned long sp; /* Stack pointer */
+ unsigned long gp; /* Global pointer */
+ unsigned long estatus;
+ unsigned long ea; /* Exception return address (pc) */
+ unsigned long orig_r7;
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+ unsigned long r16; /* r16-r23 Callee-saved GP registers */
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long fp;
+ unsigned long gp;
+ unsigned long ra;
+};
+
#define user_mode(regs) (((regs)->estatus & ESTATUS_EU))
#define instruction_pointer(regs) ((regs)->ra)
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h
deleted file mode 100644
index 2c87614b0f6e..000000000000
--- a/arch/nios2/include/asm/ucontext.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2004 Microtronix Datacom Ltd
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_NIOS2_UCONTEXT_H
-#define _ASM_NIOS2_UCONTEXT_H
-
-typedef int greg_t;
-#define NGREG 32
-typedef greg_t gregset_t[NGREG];
-
-struct mcontext {
- int version;
- gregset_t gregs;
-};
-
-#define MCONTEXT_VERSION 2
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct mcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-#endif
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 4f07ca3f8d10..376131194cc3 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -2,3 +2,5 @@ include include/uapi/asm-generic/Kbuild.asm
header-y += elf.h
header-y += ucontext.h
+
+generic-y += ucontext.h
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h
index a5b91ae5cf56..6f06d3b2949e 100644
--- a/arch/nios2/include/uapi/asm/elf.h
+++ b/arch/nios2/include/uapi/asm/elf.h
@@ -50,9 +50,7 @@
typedef unsigned long elf_greg_t;
-#define ELF_NGREG \
- ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \
- sizeof(elf_greg_t))
+#define ELF_NGREG 49
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef unsigned long elf_fpregset_t;
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
index e83a7c9d1c36..71a330597adf 100644
--- a/arch/nios2/include/uapi/asm/ptrace.h
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -67,53 +67,9 @@
#define NUM_PTRACE_REG (PTR_TLBMISC + 1)
-/* this struct defines the way the registers are stored on the
- stack during a system call.
-
- There is a fake_regs in setup.c that has to match pt_regs.*/
-
-struct pt_regs {
- unsigned long r8; /* r8-r15 Caller-saved GP registers */
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long r14;
- unsigned long r15;
- unsigned long r1; /* Assembler temporary */
- unsigned long r2; /* Retval LS 32bits */
- unsigned long r3; /* Retval MS 32bits */
- unsigned long r4; /* r4-r7 Register arguments */
- unsigned long r5;
- unsigned long r6;
- unsigned long r7;
- unsigned long orig_r2; /* Copy of r2 ?? */
- unsigned long ra; /* Return address */
- unsigned long fp; /* Frame pointer */
- unsigned long sp; /* Stack pointer */
- unsigned long gp; /* Global pointer */
- unsigned long estatus;
- unsigned long ea; /* Exception return address (pc) */
- unsigned long orig_r7;
-};
-
-/*
- * This is the extended stack used by signal handlers and the context
- * switcher: it's pushed after the normal "struct pt_regs".
- */
-struct switch_stack {
- unsigned long r16; /* r16-r23 Callee-saved GP registers */
- unsigned long r17;
- unsigned long r18;
- unsigned long r19;
- unsigned long r20;
- unsigned long r21;
- unsigned long r22;
- unsigned long r23;
- unsigned long fp;
- unsigned long gp;
- unsigned long ra;
+/* User structures for general purpose registers. */
+struct user_pt_regs {
+ __u32 regs[49];
};
#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h
index 7b8bb41867d4..b67944a50927 100644
--- a/arch/nios2/include/uapi/asm/sigcontext.h
+++ b/arch/nios2/include/uapi/asm/sigcontext.h
@@ -15,14 +15,16 @@
* details.
*/
-#ifndef _ASM_NIOS2_SIGCONTEXT_H
-#define _ASM_NIOS2_SIGCONTEXT_H
+#ifndef _UAPI__ASM_SIGCONTEXT_H
+#define _UAPI__ASM_SIGCONTEXT_H
-#include <asm/ptrace.h>
+#include <linux/types.h>
+
+#define MCONTEXT_VERSION 2
struct sigcontext {
- struct pt_regs regs;
- unsigned long sc_mask; /* old sigmask */
+ int version;
+ unsigned long gregs[32];
};
#endif
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index 2d0ea25be171..dda41e4fe707 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs,
struct ucontext *uc, int *pr2)
{
int temp;
- greg_t *gregs = uc->uc_mcontext.gregs;
+ unsigned long *gregs = uc->uc_mcontext.gregs;
int err;
/* Always make any pending restarted system calls return -EINTR */
@@ -127,7 +127,7 @@ badframe:
static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
- greg_t *gregs = uc->uc_mcontext.gregs;
+ unsigned long *gregs = uc->uc_mcontext.gregs;
int err = 0;
err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e31f32..f407bbf5ee94 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -515,15 +515,15 @@ struct s390_io_adapter {
#define S390_ARCH_FAC_MASK_SIZE_U64 \
(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
-struct s390_model_fac {
- /* facilities used in SIE context */
- __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
- /* subset enabled by kvm */
- __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+struct kvm_s390_fac {
+ /* facility list requested by guest */
+ __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* facility mask supported by kvm & hosting machine */
+ __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
};
struct kvm_s390_cpu_model {
- struct s390_model_fac *fac;
+ struct kvm_s390_fac *fac;
struct cpuid cpu_id;
unsigned short ibc;
};
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..8fb3802f8fad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
#endif
}
-static inline void clear_page(void *page)
-{
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1)
- : "memory", "cc");
-}
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e779df..830066f936c8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
insn->offset = (entry->target - entry->code) >> 1;
}
-static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+ struct insn *new)
{
unsigned char *ipc = (unsigned char *)entry->code;
- unsigned char *ipe = (unsigned char *)insn;
+ unsigned char *ipe = (unsigned char *)expected;
+ unsigned char *ipn = (unsigned char *)new;
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
+ ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
panic("Corrupted kernel text");
}
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
}
if (init) {
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &orignop, &new);
} else {
if (memcmp((void *)entry->code, &old, sizeof(old)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &old, &new);
}
probe_kernel_write((void *)entry->code, &new, sizeof(new));
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2f1814..2ca95862e336 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 26108232fcaa..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
static DEFINE_PER_CPU(struct cpuid, cpu_id);
-void cpu_relax(void)
+void notrace cpu_relax(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0c3623927563..f6579cfde2df 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
sizeof(struct cpuid));
kvm->arch.model.ibc = proc->ibc;
- memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+ memcpy(kvm->arch.model.fac->list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
} else
ret = -EFAULT;
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
}
memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
proc->ibc = kvm->arch.model.ibc;
- memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+ memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT;
kfree(proc);
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
}
get_cpu_id((struct cpuid *) &mach->cpuid);
mach->ibc = sclp_get_ibc();
- memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
- kvm_s390_fac_list_mask_size() * sizeof(u64));
+ memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_U64);
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
static int kvm_s390_query_ap_config(u8 *config)
{
u32 fcn_code = 0x04000000UL;
- u32 cc;
+ u32 cc = 0;
+ memset(config, 0, 128);
asm volatile(
"lgr 0,%1\n"
"lgr 2,%2\n"
".long 0xb2af0000\n" /* PQAP(QCI) */
- "ipm %0\n"
+ "0: ipm %0\n"
"srl %0,28\n"
- : "=r" (cc)
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+r" (cc)
: "r" (fcn_code), "r" (config)
: "cc", "0", "2", "memory"
);
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
kvm_s390_set_crycb_format(kvm);
- /* Disable AES/DEA protected key functions by default */
- kvm->arch.crypto.aes_kw = 0;
- kvm->arch.crypto.dea_kw = 0;
+ /* Enable AES/DEA protected key functions by default */
+ kvm->arch.crypto.aes_kw = 1;
+ kvm->arch.crypto.dea_kw = 1;
+ get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
return 0;
}
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/*
* The architectural maximum amount of facilities is 16 kbit. To store
* this amount, 2 kbyte of memory is required. Thus we need a full
- * page to hold the active copy (arch.model.fac->sie) and the current
- * facilities set (arch.model.fac->kvm). Its address size has to be
+ * page to hold the guest facility list (arch.model.fac->list) and the
+ * facility mask (arch.model.fac->mask). Its address size has to be
* 31 bits and word aligned.
*/
kvm->arch.model.fac =
- (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac)
goto out_nofac;
- memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_U64);
-
- /*
- * If this KVM host runs *not* in a LPAR, relax the facility bits
- * of the kvm facility mask by all missing facilities. This will allow
- * to determine the right CPU model by means of the remaining facilities.
- * Live guest migration must prohibit the migration of KVMs running in
- * a LPAR to non LPAR hosts.
- */
- if (!MACHINE_IS_LPAR)
- for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
- kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
-
- /*
- * Apply the kvm facility mask to limit the kvm supported/tolerated
- * facility list.
- */
+ /* Populate the facility mask initially. */
+ memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
- kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+ kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
else
- kvm->arch.model.fac->kvm[i] = 0UL;
+ kvm->arch.model.fac->mask[i] = 0UL;
}
+ /* Populate the facility list initially. */
+ memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+
kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
mutex_lock(&vcpu->kvm->lock);
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
- memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
mutex_unlock(&vcpu->kvm->lock);
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
- vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
+ vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 985c2114d7ef..c34109aa552d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
/* test availability of facility in a kvm intance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
- return __test_facility(nr, kvm->arch.model.fac->kvm);
+ return __test_facility(nr, kvm->arch.model.fac->mask) &&
+ __test_facility(nr, kvm->arch.model.fac->list);
}
/* are cpu states controlled by user space */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index bdd9b5b17e03..351116939ea2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
* We need to shift the lower 32 facility bits (bit 0-31) from a u64
* into a u32 memory representation. They will remain bits 0-31.
*/
- fac = *vcpu->kvm->arch.model.fac->sie >> 32;
+ fac = *vcpu->kvm->arch.model.fac->list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&fac, sizeof(fac));
if (rc)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 753a56731951..f0b85443e060 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
return (void __iomem *) addr + offset;
}
-EXPORT_SYMBOL_GPL(pci_iomap_range);
+EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
}
spin_unlock(&zpci_iomap_lock);
}
-EXPORT_SYMBOL_GPL(pci_iounmap);
+EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
}
-static void zpci_map_resources(struct zpci_dev *zdev)
+static void zpci_map_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
}
-static void zpci_unmap_resources(struct zpci_dev *zdev)
+static void zpci_unmap_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
zdev->pdev = pdev;
pdev->dev.groups = zpci_attr_groups;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
res = &pdev->resource[i];
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
return 0;
}
+void pcibios_release_device(struct pci_dev *pdev)
+{
+ zpci_unmap_resources(pdev);
+}
+
int pcibios_enable_device(struct pci_dev *pdev, int mask)
{
struct zpci_dev *zdev = get_zdev(pdev);
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
zdev->pdev = pdev;
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
return pci_enable_resources(pdev, mask);
}
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
- zpci_unmap_resources(zdev);
zpci_fmb_disable_device(zdev);
zpci_debug_exit_device(zdev);
zdev->pdev = NULL;
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int zpci_restore(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
int ret = 0;
if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev)
if (ret)
goto out;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
@@ -709,12 +711,14 @@ out:
static int zpci_freeze(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
if (zdev->state != ZPCI_FN_STATE_ONLINE)
return 0;
zpci_unregister_ioat(zdev, 0);
+ zpci_unmap_resources(pdev);
return clp_disable_fh(zdev);
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 8aa271b3d1ad..b1bb2b72302c 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (copy_from_user(buf, user_buffer, length))
goto out;
- memcpy_toio(io_addr, buf, length);
- ret = 0;
+ ret = zpci_memcpy_toio(io_addr, buf, length);
out:
if (buf != local_buf)
kfree(buf);
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
goto out;
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
- ret = -EFAULT;
- if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+ ret = -EFAULT;
goto out;
-
- memcpy_fromio(buf, io_addr, length);
-
- if (copy_to_user(user_buffer, buf, length))
+ }
+ ret = zpci_memcpy_fromio(buf, io_addr, length);
+ if (ret)
goto out;
+ if (copy_to_user(user_buffer, buf, length))
+ ret = -EFAULT;
- ret = 0;
out:
if (buf != local_buf)
kfree(buf);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 740ae3026a14..9f93af56a5fc 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn)
if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
p2m_init(p2m);
else
- p2m_init_identity(p2m, pfn);
+ p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
spin_lock_irqsave(&p2m_update_lock, flags);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 657964e8ab7e..37fb19047603 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -65,6 +65,7 @@ struct lpss_private_data;
struct lpss_device_desc {
unsigned int flags;
+ const char *clk_con_id;
unsigned int prv_offset;
size_t prv_size_override;
void (*setup)(struct lpss_private_data *pdata);
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
static struct lpss_device_desc lpt_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
+ .clk_con_id = "baudclk",
.prv_offset = 0x800,
.setup = lpss_uart_setup,
};
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
static struct lpss_device_desc byt_uart_dev_desc = {
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+ .clk_con_id = "baudclk",
.prv_offset = 0x800,
.setup = lpss_uart_setup,
};
@@ -313,7 +316,7 @@ out:
return PTR_ERR(clk);
pdata->clk = clk;
- clk_register_clkdev(clk, NULL, devname);
+ clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
return 0;
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index f9054cd36a72..5389579c5120 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -869,6 +869,8 @@ try_offline_again:
*/
ata_msleep(ap, 1);
+ sata_set_spd(link);
+
/*
* Now, bring the host controller online again, this can take time
* as PHY reset and communication establishment, 1st D2H FIS and
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 1d278ccd751f..e096e9cddb40 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
{
int rc;
- rc = device_add(&chip->dev);
+ rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
if (rc) {
dev_err(&chip->dev,
- "unable to device_register() %s, major %d, minor %d, err=%d\n",
+ "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
+ device_unregister(&chip->dev);
return rc;
}
- rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
+ rc = device_add(&chip->dev);
if (rc) {
dev_err(&chip->dev,
- "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+ "unable to device_register() %s, major %d, minor %d, err=%d\n",
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
- device_unregister(&chip->dev);
return rc;
}
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
*
- * Creates a character device for the TPM chip and adds sysfs interfaces for
- * the device, PPI and TCPA. As the last step this function adds the
- * chip to the list of TPM chips available for use.
+ * Creates a character device for the TPM chip and adds sysfs attributes for
+ * the device. As the last step this function adds the chip to the list of TPM
+ * chips available for in-kernel use.
*
- * NOTE: This function should be only called after the chip initialization
- * is complete.
- *
- * Called from tpm_<specific>.c probe function only for devices
- * the driver has determined it should claim. Prior to calling
- * this function the specific probe function has called pci_enable_device
- * upon errant exit from this function specific probe function should call
- * pci_disable_device
+ * This function should be only called after the chip initialization is
+ * complete.
*/
int tpm_chip_register(struct tpm_chip *chip)
{
int rc;
- rc = tpm_dev_add_device(chip);
- if (rc)
- return rc;
-
/* Populate sysfs for TPM1 devices. */
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
rc = tpm_sysfs_add_device(chip);
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip)
chip->bios_dir = tpm_bios_log_setup(chip->devname);
}
+ rc = tpm_dev_add_device(chip);
+ if (rc)
+ return rc;
+
/* Make the chip available. */
spin_lock(&driver_lock);
list_add_rcu(&chip->list, &tpm_chip_list);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index b1e53e3aece5..42ffa5e7a1e0 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm;
struct ibmvtpm_crq crq;
- u64 *word = (u64 *) &crq;
+ __be64 *word = (__be64 *)&crq;
int rc;
ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
crq.valid = (u8)IBMVTPM_VALID_CMD;
crq.msg = (u8)VTPM_TPM_COMMAND;
- crq.len = (u16)count;
- crq.data = ibmvtpm->rtce_dma_handle;
+ crq.len = cpu_to_be16(count);
+ crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
- cpu_to_be64(word[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
+ be64_to_cpu(word[1]));
if (rc != H_SUCCESS) {
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index f595f14426bf..6af92890518f 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -22,9 +22,9 @@
struct ibmvtpm_crq {
u8 valid;
u8 msg;
- u16 len;
- u32 data;
- u64 reserved;
+ __be16 len;
+ __be32 data;
+ __be64 reserved;
} __attribute__((packed, aligned(8)));
struct ibmvtpm_crq_queue {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index db7f8bce7467..25006a8bb8e6 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags);
}
-/*
- * The reverse of DIV_ROUND_UP: The maximum number which
- * divided by m is r
- */
-#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
-
static bool _is_valid_table_div(const struct clk_div_table *table,
unsigned int div)
{
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table,
unsigned long parent_rate, unsigned long rate,
unsigned long flags)
{
- int up, down, div;
+ int up, down;
+ unsigned long up_rate, down_rate;
- up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ up = DIV_ROUND_UP(parent_rate, rate);
+ down = parent_rate / rate;
if (flags & CLK_DIVIDER_POWER_OF_TWO) {
- up = __roundup_pow_of_two(div);
- down = __rounddown_pow_of_two(div);
+ up = __roundup_pow_of_two(up);
+ down = __rounddown_pow_of_two(down);
} else if (table) {
- up = _round_up_table(table, div);
- down = _round_down_table(table, div);
+ up = _round_up_table(table, up);
+ down = _round_down_table(table, down);
}
- return (up - div) <= (div - down) ? up : down;
+ up_rate = DIV_ROUND_UP(parent_rate, up);
+ down_rate = DIV_ROUND_UP(parent_rate, down);
+
+ return (rate - up_rate) <= (down_rate - rate) ? up : down;
}
static int _div_round(const struct clk_div_table *table,
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return i;
}
parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
- MULT_ROUND_UP(rate, i));
+ rate * i);
now = DIV_ROUND_UP(parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
bestdiv = i;
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv = readl(divider->reg) >> divider->shift;
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags);
- return bestdiv;
+ return DIV_ROUND_UP(*prate, bestdiv);
}
return divider_round_rate(hw, rate, prate, divider->table,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index eb0152961d3c..237f23f68bfc 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk)
return rate;
}
-EXPORT_SYMBOL_GPL(clk_core_get_rate);
/**
* clk_get_rate - return the rate of clk
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk)
}
/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+ /* trivial case: identical struct clk's or both NULL */
+ if (p == q)
+ return true;
+
+ /* true if clk->core pointers match. Avoid derefing garbage */
+ if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
+ if (p->core == q->core)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(clk_is_match);
+
+/**
* __clk_init - initialize the data structures in a struct clk
* @dev: device initializing this clk, placeholder for now
* @clk: clk being initialized
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index b0b562b9ce0e..e60feffc10a1 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = {
},
};
+static struct clk_regmap pll4_vote = {
+ .enable_reg = 0x34c0,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "pll4_vote",
+ .parent_names = (const char *[]){ "pll4" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
static struct clk_pll pll8 = {
.l_reg = 0x3144,
.m_reg = 0x3148,
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = {
static struct clk_regmap *gcc_msm8960_clks[] = {
[PLL3] = &pll3.clkr,
+ [PLL4_VOTE] = &pll4_vote,
[PLL8] = &pll8.clkr,
[PLL8_VOTE] = &pll8_vote,
[PLL14] = &pll14.clkr,
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = {
static struct clk_regmap *gcc_apq8064_clks[] = {
[PLL3] = &pll3.clkr,
+ [PLL4_VOTE] = &pll4_vote,
[PLL8] = &pll8.clkr,
[PLL8_VOTE] = &pll8_vote,
[PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 121ffde25dc3..c9ff27b4648b 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = {
.remove = lcc_ipq806x_remove,
.driver = {
.name = "lcc-ipq806x",
- .owner = THIS_MODULE,
.of_match_table = lcc_ipq806x_match_table,
},
};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index a75a408cfccd..e2c863295f00 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = {
.mnctr_en_bit = 8,
.mnctr_reset_bit = 7,
.mnctr_mode_shift = 5,
- .n_val_shift = 16,
- .m_val_shift = 16,
+ .n_val_shift = 24,
+ .m_val_shift = 8,
.width = 8,
},
.p = {
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
/* Use the correct frequency plan depending on speed of PLL4 */
- val = regmap_read(regmap, 0x4, &val);
+ regmap_read(regmap, 0x4, &val);
if (val == 0x12) {
slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = {
.remove = lcc_msm8960_remove,
.driver = {
.name = "lcc-msm8960",
- .owner = THIS_MODULE,
.of_match_table = lcc_msm8960_match_table,
},
};
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 6ef89639a9f6..d21640634adf 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw)
struct fapll_data *fd = to_fapll(hw);
u32 v = readl_relaxed(fd->base);
- v |= (1 << FAPLL_MAIN_PLLEN);
+ v |= FAPLL_MAIN_PLLEN;
writel_relaxed(v, fd->base);
return 0;
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw)
struct fapll_data *fd = to_fapll(hw);
u32 v = readl_relaxed(fd->base);
- v &= ~(1 << FAPLL_MAIN_PLLEN);
+ v &= ~FAPLL_MAIN_PLLEN;
writel_relaxed(v, fd->base);
}
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw)
struct fapll_data *fd = to_fapll(hw);
u32 v = readl_relaxed(fd->base);
- return v & (1 << FAPLL_MAIN_PLLEN);
+ return v & FAPLL_MAIN_PLLEN;
}
static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 111849c4c8c2..d576a4dea64f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -43,9 +43,10 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *r,
- struct drm_file *file_priv);
+static struct drm_framebuffer *
+internal_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv);
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
@@ -2943,13 +2944,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
*/
if (req->flags & DRM_MODE_CURSOR_BO) {
if (req->handle) {
- fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+ fb = internal_framebuffer_create(dev, &fbreq, file_priv);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
return PTR_ERR(fb);
}
-
- drm_framebuffer_reference(fb);
} else {
fb = NULL;
}
@@ -3308,9 +3307,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return 0;
}
-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *r,
- struct drm_file *file_priv)
+static struct drm_framebuffer *
+internal_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
@@ -3348,12 +3348,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
return fb;
}
- mutex_lock(&file_priv->fbs_lock);
- r->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
- mutex_unlock(&file_priv->fbs_lock);
-
return fb;
}
@@ -3375,15 +3369,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
+ struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = add_framebuffer_internal(dev, data, file_priv);
+ fb = internal_framebuffer_create(dev, r, file_priv);
if (IS_ERR(fb))
return PTR_ERR(fb);
+ /* Transfer ownership to the filp for reaping on close */
+
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_lock(&file_priv->fbs_lock);
+ r->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ mutex_unlock(&file_priv->fbs_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0d15e6e30732..132581ca4ad8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
bool ret;
- mutex_lock(&mgr->qlock);
+
+ /*
+ * All updates to txmsg->state are protected by mgr->qlock, and the two
+ * cases we check here are terminal states. For those the barriers
+ * provided by the wake_up/wait_event pair are enough.
+ */
ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
- mutex_unlock(&mgr->qlock);
return ret;
}
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-/* must be called holding qlock */
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
+ WARN_ON(!mutex_is_locked(&mgr->qlock));
+
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 7fc6f8bd4821..1134526286c8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
unsigned rem;
rem = do_div(tmp, alignment);
- if (tmp)
+ if (rem)
start += alignment - rem;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9a6da3536ae5..61ae8ff4eaed 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -836,8 +836,11 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj,
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page)
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
pages[i++] = sg_page_iter_page(&sg_iter);
+ if (i == npages)
+ break;
+ }
addr = vmap(pages, i, 0, PAGE_KERNEL);
if (addr == NULL) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e38f45374d55..1a52d6ab0f80 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1090,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
- IS_BROADWELL(dev)) {
+ IS_BROADWELL(dev) || IS_GEN9(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1109,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- reqf >>= 24;
- else
- reqf >>= 25;
+ if (IS_GEN9(dev))
+ reqf >>= 23;
+ else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
reqf = intel_gpu_freq(dev_priv, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
@@ -1127,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1153,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & 0xff00) >> 8);
+ (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1178,14 +1184,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = rp_state_cap & 0xff;
+ max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1831,18 +1840,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->ips.pwrctx) {
- seq_puts(m, "power context ");
- describe_obj(m, dev_priv->ips.pwrctx);
- seq_putc(m, '\n');
- }
-
- if (dev_priv->ips.renderctx) {
- seq_puts(m, "render context ");
- describe_obj(m, dev_priv->ips.renderctx);
- seq_putc(m, '\n');
- }
-
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (!i915.enable_execlists &&
ctx->legacy_hw_ctx.rcs_state == NULL)
@@ -2246,6 +2243,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
+ if (!HAS_PSR(dev)) {
+ seq_puts(m, "PSR not supported\n");
+ return 0;
+ }
+
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
@@ -2258,17 +2260,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_PSR(dev)) {
- if (HAS_DDI(dev))
- enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- else {
- for_each_pipe(dev_priv, pipe) {
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
- }
+ if (HAS_DDI(dev))
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ else {
+ for_each_pipe(dev_priv, pipe) {
+ stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ enabled = true;
}
}
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
@@ -2285,7 +2285,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
yesno((bool)dev_priv->psr.link_standby));
/* CHV PSR has no kind of performance counter */
- if (HAS_PSR(dev) && HAS_DDI(dev)) {
+ if (HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
@@ -2308,8 +2308,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
u8 crc[6];
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_encoder(dev, connector) {
if (connector->base.dpms != DRM_MODE_DPMS_ON)
continue;
@@ -2677,7 +2676,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_width, crtc->cursor_height,
+ x, y, crtc->base.cursor->state->crtc_w,
+ crtc->base.cursor->state->crtc_h,
crtc->cursor_addr, yesno(active));
}
@@ -2853,7 +2853,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -2870,6 +2870,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
return 0;
}
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev, struct intel_crtc *intel_crtc)
+{
+ struct intel_encoder *intel_encoder;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+
+ for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
+ /* Encoder connected on this CRTC */
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ seq_puts(m, "eDP:\n");
+ break;
+ case INTEL_OUTPUT_DSI:
+ seq_puts(m, "DSI:\n");
+ break;
+ case INTEL_OUTPUT_HDMI:
+ seq_puts(m, "HDMI:\n");
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ seq_puts(m, "DP:\n");
+ break;
+ default:
+ seq_printf(m, "Other encoder (id=%d).\n",
+ intel_encoder->type);
+ return;
+ }
+ }
+
+ if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Static");
+ else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
+ seq_puts(m, "\tVBT: DRRS_type: Seamless");
+ else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
+ seq_puts(m, "\tVBT: DRRS_type: None");
+ else
+ seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
+
+ seq_puts(m, "\n\n");
+
+ if (intel_crtc->config->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ drm_modeset_lock(&intel_crtc->base.mutex, NULL);
+
+ if (intel_crtc->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+ }
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -4362,7 +4471,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
- if (INTEL_INFO(dev)->gen < 9)
+ if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
@@ -4384,7 +4493,34 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
yesno(INTEL_INFO(dev)->has_eu_pg));
seq_puts(m, "SSEU Device Status\n");
- if (IS_SKYLAKE(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ const int ss_max = 2;
+ int ss;
+ u32 sig1[ss_max], sig2[ss_max];
+
+ sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+ sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+ sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+ sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ s_tot = 1;
+ ss_per++;
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ eu_tot += eu_cnt;
+ eu_per = max(eu_per, eu_cnt);
+ }
+ ss_tot = ss_per;
+ } else if (IS_SKYLAKE(dev)) {
const int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -4548,6 +4684,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 053e1788f578..d49ed68f041e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -68,6 +68,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
+ case I915_PARAM_REVISION:
+ value = dev->pdev->revision;
+ break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
@@ -150,6 +153,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_VERSION:
value = 1;
break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = INTEL_INFO(dev)->subslice_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = INTEL_INFO(dev)->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -608,14 +621,42 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
/* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev)) {
- u32 fuse, mask_eu;
+ u32 fuse, eu_dis;
fuse = I915_READ(CHV_FUSE_GT);
- mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK |
- CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total = 16 - hweight32(mask_eu);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
} else if (IS_SKYLAKE(dev)) {
const int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0001642c38b4..82f8be4b6745 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,7 +346,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
};
static const struct intel_device_info intel_cherryview_info = {
- .is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -882,12 +881,6 @@ int i915_reset(struct drm_device *dev)
}
/*
- * FIXME: This races pretty badly against concurrent holders of
- * ring interrupts. This is possible since we've started to drop
- * dev->struct_mutex in select places when waiting for the gpu.
- */
-
- /*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ee5bc43dfc0b..8ba7e1b7b733 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150227"
+#define DRIVER_DATE "20150313"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -70,6 +70,9 @@
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
#endif
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
@@ -223,9 +226,14 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_plane(pipe, p) \
- for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
-#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -238,6 +246,12 @@ enum hpd_pin {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_connector(dev, intel_connector) \
+ list_for_each_entry(intel_connector, \
+ &dev->mode_config.connector_list, \
+ base.head)
+
+
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -783,10 +797,19 @@ struct intel_context {
struct list_head link;
};
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+};
+
struct i915_fbc {
unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
+ unsigned int possible_framebuffer_bits;
+ unsigned int busy_bits;
struct intel_crtc *crtc;
int y;
@@ -799,14 +822,6 @@ struct i915_fbc {
* possible. */
bool enabled;
- /* On gen8 some rings cannont perform fbc clean operation so for now
- * we are doing this on SW with mmio.
- * This variable works in the opposite information direction
- * of ring->fbc_dirty telling software on frontbuffer tracking
- * to perform the cache clean on sw side.
- */
- bool need_sw_cache_clean;
-
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -1053,9 +1068,6 @@ struct intel_ilk_power_mgmt {
int c_m;
int r_t;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
};
struct drm_i915_private;
@@ -1398,6 +1410,25 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct vlv_wm_values {
+ struct {
+ uint16_t primary;
+ uint16_t sprite[2];
+ uint8_t cursor;
+ } pipe[3];
+
+ struct {
+ uint16_t plane;
+ uint8_t cursor;
+ } sr;
+
+ struct {
+ uint8_t cursor;
+ uint8_t sprite[2];
+ uint8_t primary;
+ } ddl[3];
+};
+
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
@@ -1760,6 +1791,7 @@ struct drm_i915_private {
union {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
+ struct vlv_wm_values vlv;
};
} wm;
@@ -2396,6 +2428,7 @@ struct drm_i915_cmd_table {
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
#include "i915_trace.h"
@@ -2433,7 +2466,7 @@ struct i915_params {
bool disable_display;
bool disable_vtd_wa;
int use_mmio_flip;
- bool mmio_debug;
+ int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0107c2ae77d0..0fe313d0f609 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -351,7 +351,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
- int ret;
+ int ret = 0;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
@@ -360,6 +360,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -370,13 +371,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
- if (unwritten)
- return -EFAULT;
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out;
+ }
}
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
- return 0;
+
+out:
+ intel_fb_obj_flush(obj, false);
+ return ret;
}
void *i915_gem_object_alloc(struct drm_device *dev)
@@ -810,6 +816,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+
while (remain > 0) {
/* Operation in this page
*
@@ -830,7 +838,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
- goto out_unpin;
+ goto out_flush;
}
remain -= page_length;
@@ -838,6 +846,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
offset += page_length;
}
+out_flush:
+ intel_fb_obj_flush(obj, false);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
@@ -952,6 +962,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if (ret)
return ret;
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+
i915_gem_object_pin_pages(obj);
offset = args->offset;
@@ -1030,6 +1042,7 @@ out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
+ intel_fb_obj_flush(obj, false);
return ret;
}
@@ -2929,9 +2942,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
req = obj->last_read_req;
/* Do this after OLR check to make sure we make forward progress polling
- * on this IOCTL with a timeout <=0 (like busy ioctl)
+ * on this IOCTL with a timeout == 0 (like busy ioctl)
*/
- if (args->timeout_ns <= 0) {
+ if (args->timeout_ns == 0) {
ret = -ETIME;
goto out;
}
@@ -2941,7 +2954,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+ ret = __i915_wait_request(req, reset_counter, true,
+ args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req);
@@ -3756,7 +3770,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -4071,7 +4085,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -4781,6 +4795,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
+ /* Double layer security blanket, see i915_gem_init() */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
@@ -4813,7 +4830,7 @@ i915_gem_init_hw(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
ret = ring->init_hw(ring);
if (ret)
- return ret;
+ goto out;
}
for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4830,9 +4847,11 @@ i915_gem_init_hw(struct drm_device *dev)
DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
- return ret;
+ goto out;
}
+out:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
@@ -4866,6 +4885,14 @@ int i915_gem_init(struct drm_device *dev)
dev_priv->gt.stop_ring = intel_logical_ring_stop;
}
+ /* This is just a security blanket to placate dragons.
+ * On some systems, we very sporadically observe that the first TLBs
+ * used by the CS may be stale, despite us poking the TLB reset. If
+ * we hold the forcewake during initialisation these problems
+ * just magically go away.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
ret = i915_gem_init_userptr(dev);
if (ret)
goto out_unlock;
@@ -4892,6 +4919,7 @@ int i915_gem_init(struct drm_device *dev)
}
out_unlock:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 85a6adaba258..dc10bc43864e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -971,7 +971,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring);
+ intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1518,7 +1518,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
- * so we don't really have issues with mutliple objects not
+ * so we don't really have issues with multiple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 74df3d1581dd..2034f7cf238b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -716,15 +716,19 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
- /* 1. Do all our allocations for page directories and page tables. */
- ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
+ /* 1. Do all our allocations for page directories and page tables.
+ * We allocate more than was asked so that we can point the unused parts
+ * to valid entries that point to scratch page. Dynamic page tables
+ * will fix this eventually.
+ */
+ ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
if (ret)
return ret;
/*
* 2. Create DMA mappings for the page directories and page tables.
*/
- for (i = 0; i < max_pdp; i++) {
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
if (ret)
goto bail;
@@ -744,7 +748,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again.
*/
- for (i = 0; i < max_pdp; i++) {
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
@@ -764,9 +768,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ /* This is the area that we advertise as usable for the caller */
+ ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+
+ /* Set all ptes to a valid scratch page. Also above requested space */
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE,
+ true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9baecb79de8c..49ad5fb82ace 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1696,11 +1696,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- /* TODO: RPS on GEN9+ is not supported yet. */
- if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
- "GEN9+: unexpected RPS IRQ\n"))
- return;
-
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
@@ -3169,15 +3164,24 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
spin_lock_irq(&dev_priv->irq_lock);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ if (pipe_mask & 1 << PIPE_A)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
+ dev_priv->de_irq_mask[PIPE_A],
+ ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
+ if (pipe_mask & 1 << PIPE_B)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
+ dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
+ if (pipe_mask & 1 << PIPE_C)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
+ dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 44f2262a5553..e2d20ffe6586 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -171,10 +171,10 @@ module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
MODULE_PARM_DESC(mmio_debug,
- "Enable the MMIO debug code (default: false). This may negatively "
- "affect performance.");
+ "Enable the MMIO debug code for the first N failures (default: off). "
+ "This may negatively affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 55143cb36e74..cc8ebabc488d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -566,6 +566,9 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
+#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */
+#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */
+#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */
#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
@@ -641,6 +644,11 @@ enum skl_disp_power_wells {
#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
+#define PUNIT_REG_DDR_SETUP2 0x139
+#define FORCE_DDR_FREQ_REQ_ACK (1 << 8)
+#define FORCE_DDR_LOW_FREQ (1 << 1)
+#define FORCE_DDR_HIGH_FREQ (1 << 0)
+
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
@@ -1029,6 +1037,7 @@ enum skl_disp_power_wells {
#define DPIO_CHV_FIRST_MOD (0 << 8)
#define DPIO_CHV_SECOND_MOD (1 << 8)
#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
+#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
#define _CHV_PLL_DW6_CH0 0x8018
@@ -1040,11 +1049,14 @@ enum skl_disp_power_wells {
#define _CHV_PLL_DW8_CH0 0x8020
#define _CHV_PLL_DW8_CH1 0x81A0
+#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
+#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
#define _CHV_PLL_DW9_CH0 0x8024
#define _CHV_PLL_DW9_CH1 0x81A4
#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
+#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
@@ -1522,6 +1534,8 @@ enum skl_disp_power_wells {
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
@@ -2099,6 +2113,14 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
+
+#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C)
+#define PFI_CREDIT_63 (9 << 28) /* chv only */
+#define PFI_CREDIT_31 (8 << 28) /* chv only */
+#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
+#define PFI_CREDIT_RESEND (1 << 27)
+#define VGA_FAST_MODE_DISABLE (1 << 14)
+
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
/*
@@ -2427,6 +2449,12 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
+#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+ INTERVAL_1_33_US(us) : \
+ INTERVAL_1_28_US(us))
+
/*
* Logical Context regs
*/
@@ -3019,7 +3047,7 @@ enum skl_disp_power_wells {
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
-/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
@@ -4065,7 +4093,7 @@ enum skl_disp_power_wells {
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
-#define DSPARB 0x70030
+#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
@@ -4073,6 +4101,9 @@ enum skl_disp_power_wells {
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
+#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
+
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
@@ -4096,8 +4127,8 @@ enum skl_disp_power_wells {
#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_CURSORA_MASK (0x3f<<8)
-#define DSPFW_PLANEC_SHIFT_OLD 0
-#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
@@ -4136,25 +4167,25 @@ enum skl_disp_power_wells {
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
#define DSPFW_CURSORC_WM1_SHIFT 8
#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
#define DSPFW_CURSORC_SHIFT 0
@@ -4163,7 +4194,7 @@ enum skl_disp_power_wells {
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (1<<24)
+#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
#define DSPFW_SPRITEF_HI_MASK (1<<23)
#define DSPFW_SPRITEE_HI_SHIFT 22
@@ -4184,7 +4215,7 @@ enum skl_disp_power_wells {
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (1<<24)
+#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
@@ -4205,21 +4236,17 @@ enum skl_disp_power_wells {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
-#define DRAIN_LATENCY_PRECISION_16 16
-#define DRAIN_LATENCY_PRECISION_32 32
-#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_HIGH (1<<31)
-#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_HIGH (1<<7)
-#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1<<7)
+#define DDL_PRECISION_LOW (0<<7)
#define DRAIN_LATENCY_MASK 0x7f
+#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
+#define CBR_PND_DEADLINE_DISABLE (1<<31)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
@@ -6080,6 +6107,7 @@ enum skl_disp_power_wells {
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
+#define GEN9_FREQUENCY(x) ((x)<<23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
@@ -6098,8 +6126,10 @@ enum skl_disp_power_wells {
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
+#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -6225,6 +6255,17 @@ enum skl_disp_power_wells {
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define CHV_POWER_SS0_SIG1 0xa720
+#define CHV_POWER_SS1_SIG1 0xa728
+#define CHV_SS_PG_ENABLE (1<<1)
+#define CHV_EU08_PG_ENABLE (1<<9)
+#define CHV_EU19_PG_ENABLE (1<<17)
+#define CHV_EU210_PG_ENABLE (1<<25)
+
+#define CHV_POWER_SS0_SIG2 0xa724
+#define CHV_POWER_SS1_SIG2 0xa72c
+#define CHV_EU311_PG_ENABLE (1<<1)
+
#define GEN9_SLICE0_PGCTL_ACK 0x804c
#define GEN9_SLICE1_PGCTL_ACK 0x8050
#define GEN9_SLICE2_PGCTL_ACK 0x8054
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 67bd07edcbb0..247626885f49 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -319,7 +319,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_GEN9(dev_priv))
+ ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 0db9ccf32605..97a88b5f6a26 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -32,7 +32,7 @@
* The following structure pages are defined in GEN MMIO space
* for virtualization. (One page for now)
*/
-#define VGT_MAGIC 0x4776544776544776 /* 'vGTvGTvG' */
+#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 011b8960fd75..3903b90fb64e 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -214,12 +214,18 @@ struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *crtc_state;
if (WARN_ON(!intel_crtc->config))
- return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL);
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ else
+ crtc_state = kmemdup(intel_crtc->config,
+ sizeof(*intel_crtc->config), GFP_KERNEL);
- return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config),
- GFP_KERNEL);
+ if (crtc_state)
+ crtc_state->base.crtc = crtc;
+
+ return &crtc_state->base;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 985d531aaf9e..8aee7d77ce9d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -156,16 +156,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */
- { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
- { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
- { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
- { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
- { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
- { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
- { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
- { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
- { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
- { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+ { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -202,7 +193,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_800mV_0dB,
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const struct ddi_buf_trans *ddi_translations_fdi;
@@ -223,9 +214,16 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
}
+ /*
+ * On SKL, the recommendation from the hw team is to always use
+ * a certain type of level shifter (and thus the corresponding
+ * 800mV+2dB entry). Given that's the only validated entry, we
+ * override what is in the VBT, at least until further notice.
+ */
+ hdmi_level = 0;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 0;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -234,7 +232,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
@@ -242,7 +240,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 6;
+ hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
@@ -252,7 +250,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_800mV_0dB = 7;
+ hdmi_default_entry = 7;
}
switch (port) {
@@ -295,7 +293,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_800mV_0dB;
+ hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
@@ -786,9 +784,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
case DPLL_CRTL1_LINK_RATE_810:
link_clock = 81000;
break;
+ case DPLL_CRTL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
case DPLL_CRTL1_LINK_RATE_1350:
link_clock = 135000;
break;
+ case DPLL_CRTL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
case DPLL_CRTL1_LINK_RATE_2700:
link_clock = 270000;
break;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1aa1cbd16c19..90b460cf2b57 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
@@ -390,7 +391,7 @@ static const intel_limit_t intel_limits_chv = {
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4860000, .max = 6480000 },
+ .vco = { .min = 4800000, .max = 6480000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 },
@@ -896,8 +897,12 @@ bool intel_crtc_active(struct drm_crtc *crtc)
*
* We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
*/
- return intel_crtc->active && crtc->primary->fb &&
+ return intel_crtc->active && crtc->primary->state->fb &&
intel_crtc->config->base.adjusted_mode.crtc_clock;
}
@@ -1300,14 +1305,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
u32 val;
if (INTEL_INFO(dev)->gen >= 9) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
I915_STATE_WARN(val & SP_ENABLE,
@@ -2533,7 +2538,6 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
break;
}
}
-
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2654,9 +2658,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane),
@@ -2758,9 +2759,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
- fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -2886,11 +2884,6 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
- DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
- i915_gem_obj_ggtt_offset(obj),
- x, y, fb->width, fb->height,
- fb->pitches[0]);
-
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
@@ -3148,38 +3141,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
-{
- return crtc->base.state->enable && crtc->active &&
- crtc->config->has_pch_encoder;
-}
-
-static void ivb_modeset_global_resources(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
- struct intel_crtc *pipe_C_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
- uint32_t temp;
-
- /*
- * When everything is off disable fdi C so that we could enable fdi B
- * with all lanes. Note that we don't care about enabled pipes without
- * an enabled pch encoder.
- */
- if (!pipe_has_enabled_pch(pipe_B_crtc) &&
- !pipe_has_enabled_pch(pipe_C_crtc)) {
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp = I915_READ(SOUTH_CHICKEN1);
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("disabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- }
-}
-
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -3835,20 +3796,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
- if (temp & FDI_BC_BIFURCATION_SELECT)
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
- temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("enabling fdi C rx\n");
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
@@ -3856,20 +3820,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config->fdi_lanes > 2)
- WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ cpt_set_fdi_bc_bifurcation(dev, false);
else
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
case PIPE_C:
- cpt_enable_fdi_bc_bifurcation(dev);
+ cpt_set_fdi_bc_bifurcation(dev, true);
break;
default:
@@ -4204,6 +4167,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc)
}
}
+/*
+ * Disable a plane internally without actually modifying the plane's state.
+ * This will allow us to easily restore the plane later by just reprogramming
+ * its state.
+ */
+static void disable_plane_internal(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_plane_state *state =
+ plane->funcs->atomic_duplicate_state(plane);
+ struct intel_plane_state *intel_state = to_intel_plane_state(state);
+
+ intel_state->visible = false;
+ intel_plane->commit_plane(plane, intel_state);
+
+ intel_plane_destroy_state(plane, state);
+}
+
static void intel_disable_sprite_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4213,8 +4194,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe == pipe)
- plane->funcs->disable_plane(plane);
+ if (plane->fb && intel_plane->pipe == pipe)
+ disable_plane_internal(plane);
}
}
@@ -4983,24 +4964,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
switch (cdclk) {
- case 400000:
- cmd = 3;
- break;
case 333333:
case 320000:
- cmd = 2;
- break;
case 266667:
- cmd = 1;
- break;
case 200000:
- cmd = 0;
break;
default:
MISSING_CASE(cdclk);
return;
}
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@@ -5020,27 +5000,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
-
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev_priv->dev))
- return 400000;
+ int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320/333MHz (depends on HPLL freq)
- * 400MHz
- * So we check to see whether we're above 90% of the lower bin and
- * adjust if needed.
+ * 400MHz (VLV only)
+ * So we check to see whether we're above 90% (VLV) or 95% (CHV)
+ * of the lower bin and adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (max_pixclk > freq_320*9/10)
+ if (!IS_CHERRYVIEW(dev_priv) &&
+ max_pixclk > freq_320*limit/100)
return 400000;
- else if (max_pixclk > 266667*9/10)
+ else if (max_pixclk > 266667*limit/100)
return freq_320;
else if (max_pixclk > 0)
return 266667;
@@ -5081,6 +5059,42 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
*prepare_pipes |= (1 << intel_crtc->pipe);
}
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+ unsigned int credits, default_credits;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ default_credits = PFI_CREDIT(12);
+ else
+ default_credits = PFI_CREDIT(8);
+
+ if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
+ /* CHV suggested value is 31 or 63 */
+ if (IS_CHERRYVIEW(dev_priv))
+ credits = PFI_CREDIT_31;
+ else
+ credits = PFI_CREDIT(15);
+ } else {
+ credits = default_credits;
+ }
+
+ /*
+ * WA - write default credits before re-programming
+ * FIXME: should we also set the resend bit here?
+ */
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ default_credits);
+
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ credits | PFI_CREDIT_RESEND);
+
+ /*
+ * FIXME is this guaranteed to clear
+ * immediately or should we poll for it?
+ */
+ WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
static void valleyview_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5104,6 +5118,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
else
valleyview_set_cdclk(dev, req_cdclk);
+ vlv_program_pfi_credits(dev_priv);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
@@ -5517,13 +5533,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
return encoder->get_hw_state(encoder, &pipe);
}
+static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+
+ if (crtc->base.state->enable &&
+ crtc->config->has_pch_encoder)
+ return crtc->config->fdi_lanes;
+
+ return 0;
+}
+
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
-
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
@@ -5550,22 +5574,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
case PIPE_A:
return true;
case PIPE_B:
- if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
- pipe_config->fdi_lanes > 2) {
+ if (pipe_config->fdi_lanes > 2 &&
+ pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
}
return true;
case PIPE_C:
- if (!pipe_has_enabled_pch(pipe_B_crtc) ||
- pipe_B_crtc->config->fdi_lanes <= 2) {
- if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
- }
- } else {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
@@ -5699,10 +5721,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
u32 val;
int divider;
- /* FIXME: Punit isn't quite ready yet */
- if (IS_CHERRYVIEW(dev))
- return 400000;
-
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
@@ -6144,9 +6162,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 loopfilter, intcoeff;
+ u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
- int refclk;
+ u32 dpio_val;
+ int vco;
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
@@ -6154,6 +6173,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
bestm2 = pipe_config->dpll.m2 >> 22;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
+ vco = pipe_config->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
/*
* Enable Refclk and SSC
@@ -6179,26 +6201,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ if (bestm2_frac)
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
- DPIO_CHV_FRAC_DIV_EN |
- (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
- refclk = i9xx_get_refclk(crtc, 0);
- loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
- 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
- if (refclk == 100000)
- intcoeff = 11;
- else if (refclk == 38400)
- intcoeff = 10;
- else
- intcoeff = 9;
- loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
/* AFC Recal */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
@@ -8409,8 +8461,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
uint32_t cntl = 0, size = 0;
if (base) {
- unsigned int width = intel_crtc->cursor_width;
- unsigned int height = intel_crtc->cursor_height;
+ unsigned int width = intel_crtc->base.cursor->state->crtc_w;
+ unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
switch (stride) {
@@ -8474,7 +8526,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl = 0;
if (base) {
cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->cursor_width) {
+ switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -8485,7 +8537,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(intel_crtc->cursor_width);
+ MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
@@ -8532,7 +8584,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
base = 0;
if (x < 0) {
- if (x + intel_crtc->cursor_width <= 0)
+ if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -8541,7 +8593,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
- if (y + intel_crtc->cursor_height <= 0)
+ if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -8557,8 +8609,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
- base += (intel_crtc->cursor_height *
- intel_crtc->cursor_width - 1) * 4;
+ base += (intel_crtc->base.cursor->state->crtc_h *
+ intel_crtc->base.cursor->state->crtc_w - 1) * 4;
}
if (IS_845G(dev) || IS_I865G(dev))
@@ -9219,7 +9271,6 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_fb_obj(work->old_fb));
drm_gem_object_unreference(&work->pending_flip_obj->base);
- drm_framebuffer_unreference(work->old_fb);
intel_fbc_update(dev);
@@ -9228,6 +9279,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+ drm_framebuffer_unreference(work->old_fb);
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9799,7 +9851,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- WARN_ON(!in_irq());
+ WARN_ON(!in_interrupt());
if (crtc == NULL)
return;
@@ -9891,10 +9943,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
/* Reference the objects for the scheduled work. */
drm_framebuffer_reference(work->old_fb);
drm_gem_object_reference(&obj->base);
@@ -9904,6 +9952,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->pending_flip_obj = obj;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
+
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -9968,13 +10020,14 @@ cleanup_unpin:
intel_unpin_fb_obj(obj);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+ mutex_unlock(&dev->struct_mutex);
+cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
+
+ drm_gem_object_unreference_unlocked(&obj->base);
drm_framebuffer_unreference(work->old_fb);
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&dev->struct_mutex);
-cleanup:
spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
spin_unlock_irq(&dev->event_lock);
@@ -10014,8 +10067,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(connector->base.encoder);
}
@@ -10046,8 +10098,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
connector->base.encoder = &connector->new_encoder->base;
}
@@ -10135,8 +10186,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
pipe_config->pipe_bpp = bpp;
/* Clamp display bpp to EDID value */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (!connector->new_encoder ||
connector->new_encoder->new_crtc != crtc)
continue;
@@ -10263,8 +10313,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
struct intel_encoder *encoder = connector->new_encoder;
if (!encoder)
@@ -10437,8 +10486,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
* to be part of the prepare_pipes mask. We don't (yet) support global
* modeset across multiple crtcs, so modeset_pipes will only have one
* bit set at most. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder == &connector->new_encoder->base)
continue;
@@ -10807,7 +10855,7 @@ static void check_wm_state(struct drm_device *dev)
continue;
/* planes */
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
hw_entry = &hw_ddb.plane[pipe][plane];
sw_entry = &sw_ddb->plane[pipe][plane];
@@ -10841,8 +10889,7 @@ check_connector_state(struct drm_device *dev)
{
struct intel_connector *connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
@@ -10872,8 +10919,7 @@ check_encoder_state(struct drm_device *dev)
I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.encoder != &encoder->base)
continue;
enabled = true;
@@ -11394,7 +11440,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
connector->new_encoder =
to_intel_encoder(config->save_connector_encoders[count++]);
}
@@ -11486,8 +11532,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
/* Otherwise traverse passed in connector list and get encoders
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
@@ -11512,15 +11557,16 @@ intel_modeset_stage_output_state(struct drm_device *dev,
if (&connector->new_encoder->base != connector->base.encoder) {
- DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
+ connector->base.base.id,
+ connector->base.name);
config->mode_changed = true;
}
}
/* connector->new_encoder is now updated for all connectors. */
/* Update crtc of enabled connectors. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
struct drm_crtc *new_crtc;
if (!connector->new_encoder)
@@ -11549,9 +11595,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Check for any encoders that needs to be disabled. */
for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
num_connectors++;
@@ -11566,13 +11610,14 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
- DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
+ encoder->base.base.id,
+ encoder->base.name);
config->mode_changed = true;
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder)
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
@@ -11588,7 +11633,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
if (crtc->new_enabled != crtc->base.state->enable) {
- DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+ DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
+ crtc->base.base.id,
crtc->new_enabled ? "en" : "dis");
config->mode_changed = true;
}
@@ -11611,7 +11657,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
pipe_name(crtc->pipe));
- list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->new_encoder &&
connector->new_encoder->new_crtc == crtc)
connector->new_encoder = NULL;
@@ -12182,8 +12228,8 @@ void intel_plane_destroy(struct drm_plane *plane)
}
const struct drm_plane_funcs intel_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ .update_plane = drm_plane_helper_update,
+ .disable_plane = drm_plane_helper_disable,
.destroy = intel_plane_destroy,
.set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
@@ -12302,7 +12348,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
finish:
if (intel_crtc->active) {
- if (intel_crtc->cursor_width != state->base.crtc_w)
+ if (plane->state->crtc_w != state->base.crtc_w)
intel_crtc->atomic.update_wm = true;
intel_crtc->atomic.fb_bits |=
@@ -12345,8 +12391,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
update:
- intel_crtc->cursor_width = state->base.crtc_w;
- intel_crtc->cursor_height = state->base.crtc_h;
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, state->visible);
@@ -12574,10 +12618,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (HAS_DDI(dev)) {
int found;
- /* Haswell uses DDI functions to detect digital outputs */
+ /*
+ * Haswell uses DDI functions to detect digital outputs.
+ * On SKL pre-D0 the strap isn't connected, so we assume
+ * it's there.
+ */
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
- /* DDI A only supports eDP */
- if (found)
+ /* WaIgnoreDDIAStrap: skl */
+ if (found ||
+ (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -13068,8 +13117,6 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
} else if (IS_VALLEYVIEW(dev)) {
@@ -13365,7 +13412,7 @@ void intel_modeset_init(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
intel_crtc_init(dev, pipe);
- for_each_sprite(pipe, sprite) {
+ for_each_sprite(dev_priv, pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
@@ -13421,9 +13468,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
@@ -13494,8 +13539,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
crtc->plane = plane;
/* ... and break all links. */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder->base.crtc != &crtc->base)
continue;
@@ -13504,8 +13548,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
}
/* multiple connectors may have the same encoder:
* handle them and break crtc link separately */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head)
+ for_each_intel_connector(dev, connector)
if (connector->encoder->base.crtc == &crtc->base) {
connector->encoder->base.crtc = NULL;
connector->encoder->connectors_active = false;
@@ -13609,9 +13652,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->encoder != encoder)
continue;
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -13726,8 +13767,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- base.head) {
+ for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->encoder->connectors_active = true;
@@ -13907,8 +13947,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_fbc_disable(dev);
- ironlake_teardown_rc6(dev);
-
mutex_unlock(&dev->struct_mutex);
/* flush any delayed tasks or pending work */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d1141d37e205..ca60060710d2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -84,6 +84,11 @@ static const struct dp_link_dpll chv_dpll[] = {
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
+/* Skylake supports following rates */
+static const uint32_t gen9_rates[] = { 162000, 216000, 270000, 324000,
+ 432000, 540000 };
+
+static const uint32_t default_rates[] = { 162000, 270000, 540000 };
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -129,7 +134,10 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ /* WaDisableHBR2:skl */
+ max_link_bw = DP_LINK_BW_2_7;
+ else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
INTEL_INFO(dev)->gen >= 8) &&
intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
max_link_bw = DP_LINK_BW_5_4;
@@ -1075,7 +1083,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
{
u32 ctrl1;
@@ -1084,19 +1092,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw)
pipe_config->dpll_hw_state.cfgcr2 = 0;
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (link_bw) {
- case DP_LINK_BW_1_62:
+ switch (link_clock / 2) {
+ case 81000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
SKL_DPLL0);
break;
- case DP_LINK_BW_2_7:
+ case 135000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
SKL_DPLL0);
break;
- case DP_LINK_BW_5_4:
+ case 270000:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
SKL_DPLL0);
break;
+ case 162000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
+ SKL_DPLL0);
+ break;
+ /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+ results in CDCLK change. Need to handle the change of CDCLK by
+ disabling pipes and re-enabling them */
+ case 108000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
+ SKL_DPLL0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
+ SKL_DPLL0);
+ break;
+
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
@@ -1117,6 +1141,52 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
}
}
+static int
+intel_read_sink_rates(struct intel_dp *intel_dp, uint32_t *sink_rates)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ int i = 0;
+ uint16_t val;
+
+ if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
+ /*
+ * Receiver supports only main-link rate selection by
+ * link rate table method, so read link rates from
+ * supported_link_rates
+ */
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) {
+ val = le16_to_cpu(intel_dp->supported_rates[i]);
+ if (val == 0)
+ break;
+
+ sink_rates[i] = val * 200;
+ }
+
+ if (i <= 0)
+ DRM_ERROR("No rates in SUPPORTED_LINK_RATES");
+ }
+ return i;
+}
+
+static int
+intel_read_source_rates(struct intel_dp *intel_dp, uint32_t *source_rates)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ int i;
+ int max_default_rate;
+
+ if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
+ for (i = 0; i < ARRAY_SIZE(gen9_rates); ++i)
+ source_rates[i] = gen9_rates[i];
+ } else {
+ /* Index of the max_link_bw supported + 1 */
+ max_default_rate = (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+ for (i = 0; i < max_default_rate; ++i)
+ source_rates[i] = default_rates[i];
+ }
+ return i;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config, int link_bw)
@@ -1150,6 +1220,45 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
+static int intel_supported_rates(const uint32_t *source_rates, int source_len,
+const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ /* For panels with edp version less than 1.4 */
+ if (sink_len == 0) {
+ for (i = 0; i < source_len; ++i)
+ supported_rates[i] = source_rates[i];
+ return source_len;
+ }
+
+ /* For edp1.4 panels, find the common rates between source and sink */
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ supported_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int rate_to_index(uint32_t find, const uint32_t *rates)
+{
+ int i = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
+ if (find == rates[i])
+ break;
+
+ return i;
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1166,10 +1275,25 @@ intel_dp_compute_config(struct intel_encoder *encoder,
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
- int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
+ int max_clock;
int bpp, mode_rate;
- static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
+ uint32_t sink_rates[8];
+ uint32_t supported_rates[8] = {0};
+ uint32_t source_rates[8];
+ int source_len, sink_len, supported_len;
+
+ sink_len = intel_read_sink_rates(intel_dp, sink_rates);
+
+ source_len = intel_read_source_rates(intel_dp, source_rates);
+
+ supported_len = intel_supported_rates(source_rates, source_len,
+ sink_rates, sink_len, supported_rates);
+
+ /* No common link rates between source and sink */
+ WARN_ON(supported_len <= 0);
+
+ max_clock = supported_len - 1;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -1193,8 +1317,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock],
+ "max bw %d pixel clock %iKHz\n",
+ max_lane_count, supported_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1223,8 +1347,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
bpp);
for (clock = min_clock; clock <= max_clock; clock++) {
- for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
- link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ for (lane_count = min_lane_count;
+ lane_count <= max_lane_count;
+ lane_count <<= 1) {
+
+ link_clock = supported_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1253,10 +1380,19 @@ found:
if (intel_dp->color_range)
pipe_config->limited_color_range = true;
- intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
+
+ intel_dp->link_bw =
+ drm_dp_link_rate_to_bw_code(supported_rates[clock]);
+
+ if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
+ intel_dp->rate_select =
+ rate_to_index(supported_rates[clock], sink_rates);
+ intel_dp->link_bw = 0;
+ }
+
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = supported_rates[clock];
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -1279,7 +1415,7 @@ found:
}
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
@@ -3366,6 +3502,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+ if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+ &intel_dp->rate_select, 1);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
@@ -3578,6 +3717,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint8_t rev;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3609,6 +3749,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
} else
intel_dp->use_tps3 = false;
+ /* Intermediate frequency support */
+ if (is_edp(intel_dp) &&
+ (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+ (rev >= 0x03)) { /* eDp v1.4 or higher */
+ intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SUPPORTED_LINK_RATES,
+ intel_dp->supported_rates,
+ sizeof(intel_dp->supported_rates));
+ }
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -4966,12 +5116,13 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
if (!dev_priv->drrs.dp)
return;
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
- cancel_delayed_work_sync(&dev_priv->drrs.work);
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
@@ -5004,13 +5155,13 @@ void intel_edp_drrs_flush(struct drm_device *dev,
if (!dev_priv->drrs.dp)
return;
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+
mutex_lock(&dev_priv->drrs.mutex);
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
!dev_priv->drrs.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->drrs.work,
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9f67a379a9a5..be124928ca14 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -58,7 +58,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, intel_connector) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
@@ -140,7 +140,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ for_each_intel_connector(dev, intel_connector) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f4aa849b243e..c77128c67cf8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -464,7 +464,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
@@ -623,10 +622,12 @@ struct intel_dp {
uint32_t color_range;
bool color_range_auto;
uint8_t link_bw;
+ uint8_t rate_select;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -839,7 +840,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -874,7 +876,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
@@ -1115,7 +1118,11 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1231,7 +1238,6 @@ void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
-void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 618f7bdab0ba..9fcf446e95f5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -174,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void snb_fbc_blit_update(struct drm_device *dev)
+static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
-
- /* Blitter is part of Media powerwell on VLV. No impact of
- * his param in other platforms for now */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
-
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ POSTING_READ(MSG_FBC_REND_STATE);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
@@ -239,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
}
+ intel_fbc_nuke(dev_priv);
+
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -320,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- snb_fbc_blit_update(dev);
+ intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
@@ -340,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->fbc.enabled;
}
-void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_GEN8(dev))
- return;
-
- if (!intel_fbc_enabled(dev))
- return;
-
- I915_WRITE(MSG_FBC_REND_STATE, value);
-}
-
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
@@ -685,6 +654,44 @@ out_disable:
i915_gem_stolen_cleanup_compression(dev);
}
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ struct drm_device *dev = dev_priv->dev;
+ unsigned int fbc_bits;
+
+ if (origin == ORIGIN_GTT)
+ return;
+
+ if (dev_priv->fbc.enabled)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
+ else if (dev_priv->fbc.fbc_work)
+ fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
+ to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+ else
+ fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
+
+ dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+
+ if (dev_priv->fbc.busy_bits)
+ intel_fbc_disable(dev);
+}
+
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!dev_priv->fbc.busy_bits)
+ return;
+
+ dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+
+ if (!dev_priv->fbc.busy_bits)
+ intel_fbc_update(dev);
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -693,12 +700,22 @@ out_disable:
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
return;
}
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->fbc.possible_framebuffer_bits |=
+ INTEL_FRONTBUFFER_PRIMARY(pipe);
+
+ if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
+ break;
+ }
+
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = gen7_fbc_enable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 234a699b8219..757c0d216f80 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -71,6 +71,31 @@ static int intel_fbdev_set_par(struct fb_info *info)
return ret;
}
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_blank(blank, info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -79,7 +104,7 @@ static struct fb_ops intelfb_ops = {
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
+ .fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 73cb6e036445..0a1bac8ac72b 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -118,8 +118,6 @@ static void intel_mark_fb_busy(struct drm_device *dev,
continue;
intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
}
}
@@ -127,6 +125,7 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
+ * @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
@@ -135,7 +134,8 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -158,6 +158,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
+ intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
}
/**
@@ -185,16 +186,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (dev_priv->fbc.need_sw_cache_clean) {
- dev_priv->fbc.need_sw_cache_clean = false;
- bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
- }
+ intel_fbc_flush(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 542cf6844dc3..288c9d24098e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -263,6 +263,47 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
struct drm_device *dev = dev_priv->dev;
@@ -270,6 +311,8 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ if (IS_CHERRYVIEW(dev))
+ chv_set_memory_pm5(dev_priv, enable);
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
} else if (IS_PINEVIEW(dev)) {
@@ -292,6 +335,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
enable ? "enabled" : "disabled");
}
+
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -308,6 +352,61 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
*/
static const int pessimal_latency_ns = 5000;
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static int vlv_get_fifo_size(struct drm_device *dev,
+ enum pipe pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int sprite0_start, sprite1_start, size;
+
+ switch (pipe) {
+ uint32_t dsparb, dsparb2, dsparb3;
+ case PIPE_A:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = I915_READ(DSPARB);
+ dsparb2 = I915_READ(DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = I915_READ(DSPARB2);
+ dsparb3 = I915_READ(DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ return 0;
+ }
+
+ switch (plane) {
+ case 0:
+ size = sprite0_start;
+ break;
+ case 1:
+ size = sprite1_start - sprite0_start;
+ break;
+ case 2:
+ size = 512 - 1 - sprite1_start;
+ break;
+ default:
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
+ pipe_name(pipe), plane == 0 ? "primary" : "sprite",
+ plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
+ size);
+
+ return size;
+}
+
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -553,7 +652,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int clock;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
@@ -565,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
+ reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
@@ -575,7 +674,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ reg |= FW_WM(wm, CURSOR_SR);
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
@@ -584,7 +683,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
+ reg |= FW_WM(wm, HPLL_SR);
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
@@ -593,7 +692,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
@@ -629,7 +728,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -644,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
+ entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -716,7 +815,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -730,7 +829,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
+ entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
@@ -739,232 +838,234 @@ static bool g4x_compute_srwm(struct drm_device *dev,
display, cursor);
}
-static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
- int pixel_size,
- int *prec_mult,
- int *drain_latency)
-{
- struct drm_device *dev = crtc->dev;
- int entries;
- int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
- if (WARN(clock == 0, "Pixel clock is zero!\n"))
- return false;
+static void vlv_write_wm_values(struct intel_crtc *crtc,
+ const struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
- return false;
+ I915_WRITE(VLV_DDL(pipe),
+ (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
- entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (IS_CHERRYVIEW(dev))
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
- DRAIN_LATENCY_PRECISION_16;
- else
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
- *drain_latency = (64 * (*prec_mult) * 4) / entries;
+ I915_WRITE(DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+ I915_WRITE(DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+ I915_WRITE(DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ I915_WRITE(DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+ I915_WRITE(DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ } else {
+ I915_WRITE(DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ I915_WRITE(DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ }
- if (*drain_latency > DRAIN_LATENCY_MASK)
- *drain_latency = DRAIN_LATENCY_MASK;
+ POSTING_READ(DSPFW1);
- return true;
+ dev_priv->wm.vlv = *wm;
}
-/*
- * Update drain latency registers of memory arbiter
- *
- * Valleyview SoC has a new memory arbiter and needs drain latency registers
- * to be programmed. Each plane has a drain latency multiplier and a drain
- * latency value.
- */
+#undef FW_WM_VLV
-static void vlv_update_drain_latency(struct drm_crtc *crtc)
+static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
+ struct drm_plane *plane)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pixel_size;
- int drain_latency;
- enum pipe pipe = intel_crtc->pipe;
- int plane_prec, prec_mult, plane_dl;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ int entries, prec_mult, drain_latency, pixel_size;
+ int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
+ const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
- (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!intel_crtc->active || !plane->state->fb)
+ return 0;
- if (!intel_crtc_active(crtc)) {
- I915_WRITE(VLV_DDL(pipe), plane_dl);
- return;
- }
+ if (WARN(clock == 0, "Pixel clock is zero!\n"))
+ return 0;
- /* Primary plane Drain Latency */
- pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
- if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_PLANE_PRECISION_HIGH :
- DDL_PLANE_PRECISION_LOW;
- plane_dl |= plane_prec | drain_latency;
- }
+ pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
- /* Cursor Drain Latency
- * BPP is always 4 for cursor
- */
- pixel_size = 4;
+ if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
+ return 0;
+
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+
+ prec_mult = high_precision;
+ drain_latency = 64 * prec_mult * 4 / entries;
- /* Program cursor DL only if it is enabled */
- if (intel_crtc->cursor_base &&
- vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_CURSOR_PRECISION_HIGH :
- DDL_CURSOR_PRECISION_LOW;
- plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
+ if (drain_latency > DRAIN_LATENCY_MASK) {
+ prec_mult /= 2;
+ drain_latency = 64 * prec_mult * 4 / entries;
}
- I915_WRITE(VLV_DDL(pipe), plane_dl);
-}
+ if (drain_latency > DRAIN_LATENCY_MASK)
+ drain_latency = DRAIN_LATENCY_MASK;
-#define single_plane_enabled(mask) is_power_of_2(mask)
+ return drain_latency | (prec_mult == high_precision ?
+ DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
+}
-static void valleyview_update_wm(struct drm_crtc *crtc)
+static int vlv_compute_wm(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ int fifo_size)
{
- struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
- bool cxsr_enabled;
+ int clock, entries, pixel_size;
- vlv_update_drain_latency(crtc);
+ /*
+ * FIXME the plane might have an fb
+ * but be invisible (eg. due to clipping)
+ */
+ if (!crtc->active || !plane->base.state->fb)
+ return 0;
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+ clock = crtc->config->base.adjusted_mode.crtc_clock;
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
+ /*
+ * Set up the watermark such that we don't start issuing memory
+ * requests until we are within PND's max deadline value (256us).
+ * Idea being to be idle as long as possible while still taking
+ * advatange of PND's deadline scheduling. The limit of 8
+ * cachelines (used when the FIFO will anyway drain in less time
+ * than 256us) should match what we would be done if trickle
+ * feed were enabled.
+ */
+ return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+}
+
+static bool vlv_compute_sr_wm(struct drm_device *dev,
+ struct vlv_wm_values *wm)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_crtc *crtc;
+ enum pipe pipe = INVALID_PIPE;
+ int num_planes = 0;
+ int fifo_size = 0;
+ struct intel_plane *plane;
+
+ wm->sr.cursor = wm->sr.plane = 0;
+
+ crtc = single_enabled_crtc(dev);
+ /* maxfifo not supported on pipe C */
+ if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
+ pipe = to_intel_crtc(crtc)->pipe;
+ num_planes = !!wm->pipe[pipe].primary +
+ !!wm->pipe[pipe].sprite[0] +
+ !!wm->pipe[pipe].sprite[1];
+ fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
+ if (fifo_size == 0 || num_planes > 1)
+ return false;
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
+ to_intel_plane(crtc->cursor), 0x3f);
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
+ if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (plane->pipe != pipe)
+ continue;
+
+ wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
+ plane, fifo_size);
+ if (wm->sr.plane != 0)
+ break;
+ }
+
+ return true;
}
-static void cherryview_update_wm(struct drm_crtc *crtc)
+static void valleyview_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, planec_wm;
- int cursora_wm, cursorb_wm, cursorc_wm;
- int plane_sr, cursor_sr;
- int ignore_plane_sr, ignore_cursor_sr;
- unsigned int enabled = 0;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- vlv_update_drain_latency(crtc);
+ wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
+ wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->primary),
+ vlv_get_fifo_size(dev, pipe, 0));
- if (g4x_compute_wm0(dev, PIPE_A,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
+ wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
+ wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
+ to_intel_plane(crtc->cursor),
+ 0x3f);
- if (g4x_compute_wm0(dev, PIPE_B,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
- if (g4x_compute_wm0(dev, PIPE_C,
- &valleyview_wm_info, pessimal_latency_ns,
- &valleyview_cursor_wm_info, pessimal_latency_ns,
- &planec_wm, &cursorc_wm))
- enabled |= 1 << PIPE_C;
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &plane_sr, &ignore_cursor_sr) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- 2*sr_latency_ns,
- &valleyview_wm_info,
- &valleyview_cursor_wm_info,
- &ignore_plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
- }
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+ wm.sr.plane, wm.sr.cursor);
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
- "SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- planec_wm, cursorc_wm,
- plane_sr, cursor_sr);
+ /*
+ * FIXME DDR DVFS introduces massive memory latencies which
+ * are not known to system agent so any deadline specified
+ * by the display may not be respected. To support DDR DVFS
+ * the watermark code needs to be rewritten to essentially
+ * bypass deadline mechanism and rely solely on the
+ * watermarks. For now disable DDR DVFS.
+ */
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_set_memory_dvfs(dev_priv, false);
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
- I915_WRITE(DSPFW9_CHV,
- (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
- DSPFW_CURSORC_MASK)) |
- (planec_wm << DSPFW_PLANEC_SHIFT) |
- (cursorc_wm << DSPFW_CURSORC_SHIFT));
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -979,30 +1080,47 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_plane(plane)->pipe;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
int sprite = to_intel_plane(plane)->plane;
- int drain_latency;
- int plane_prec;
- int sprite_dl;
- int prec_mult;
- const int high_precision = IS_CHERRYVIEW(dev) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
+ bool cxsr_enabled;
+ struct vlv_wm_values wm = dev_priv->wm.vlv;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
- (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
+ if (enabled) {
+ wm.ddl[pipe].sprite[sprite] =
+ vlv_compute_drain_latency(crtc, plane);
- if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
- &drain_latency)) {
- plane_prec = (prec_mult == high_precision) ?
- DDL_SPRITE_PRECISION_HIGH(sprite) :
- DDL_SPRITE_PRECISION_LOW(sprite);
- sprite_dl |= plane_prec |
- (drain_latency << DDL_SPRITE_SHIFT(sprite));
+ wm.pipe[pipe].sprite[sprite] =
+ vlv_compute_wm(intel_crtc,
+ to_intel_plane(plane),
+ vlv_get_fifo_size(dev, pipe, sprite+1));
+ } else {
+ wm.ddl[pipe].sprite[sprite] = 0;
+ wm.pipe[pipe].sprite[sprite] = 0;
}
- I915_WRITE(VLV_DDL(pipe), sprite_dl);
+ cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+
+ if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+ return;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
+ "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
+ sprite_name(pipe, sprite),
+ wm.pipe[pipe].sprite[sprite],
+ wm.sr.plane, wm.sr.cursor);
+
+ if (!cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(intel_crtc, &wm);
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1045,17 +1163,17 @@ static void g4x_update_wm(struct drm_crtc *crtc)
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- (planea_wm << DSPFW_PLANEA_SHIFT));
+ FW_WM(plane_sr, SR) |
+ FW_WM(cursorb_wm, CURSORB) |
+ FW_WM(planeb_wm, PLANEB) |
+ FW_WM(planea_wm, PLANEA));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
+ FW_WM(cursora_wm, CURSORA));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -1080,7 +1198,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
+ int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1098,7 +1216,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * to_intel_crtc(crtc)->cursor_width;
+ pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1121,19 +1239,21 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << DSPFW_CURSORB_SHIFT) |
- (8 << DSPFW_PLANEB_SHIFT) |
- (8 << DSPFW_PLANEA_SHIFT));
- I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
- (8 << DSPFW_PLANEC_SHIFT_OLD));
+ I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+ I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
+#undef FW_WM
+
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
@@ -1157,7 +1277,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1179,7 +1299,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->fb->bits_per_pixel / 8;
+ int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1202,7 +1322,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I915GM(dev) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->state->fb);
/* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE)
@@ -1226,7 +1346,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
+ int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1663,7 +1783,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -1918,19 +2038,31 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
p->active = true;
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
- p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
- p->cur.bytes_per_pixel = 4;
+
+ if (crtc->primary->state->fb) {
+ p->pri.enabled = true;
+ p->pri.bytes_per_pixel =
+ crtc->primary->state->fb->bits_per_pixel / 8;
+ } else {
+ p->pri.enabled = false;
+ p->pri.bytes_per_pixel = 0;
+ }
+
+ if (crtc->cursor->state->fb) {
+ p->cur.enabled = true;
+ p->cur.bytes_per_pixel = 4;
+ } else {
+ p->cur.enabled = false;
+ p->cur.bytes_per_pixel = 0;
+ }
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
- p->cur.horiz_pixels = intel_crtc->cursor_width;
- /* TODO: for now, assume primary and cursor planes are always enabled. */
- p->pri.enabled = true;
- p->cur.enabled = true;
+ p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2430,7 +2562,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
nth_active_pipe = 0;
for_each_crtc(dev, crtc) {
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
continue;
if (crtc == for_crtc)
@@ -2463,13 +2595,12 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
int plane;
u32 val;
for_each_pipe(dev_priv, pipe) {
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -2518,6 +2649,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
@@ -2542,7 +2674,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
alloc->end -= cursor_blocks;
/* 1. Allocate the mininum required blocks for each active plane */
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
const struct intel_plane_wm_parameters *p;
p = &params->plane[plane];
@@ -2670,7 +2802,7 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct drm_plane *plane;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- config->num_pipes_active += intel_crtc_active(crtc);
+ config->num_pipes_active += to_intel_crtc(crtc)->active;
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2691,32 +2823,36 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
- p->active = intel_crtc_active(crtc);
+ p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
- /*
- * For now, assume primary and cursor planes are always enabled.
- */
- p->plane[0].enabled = true;
- p->plane[0].bytes_per_pixel =
- crtc->primary->fb->bits_per_pixel / 8;
- p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
- p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
- p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
fb = crtc->primary->state->fb;
- /*
- * Framebuffer can be NULL on plane disable, but it does not
- * matter for watermarks if we assume no tiling in that case.
- */
- if (fb)
+ if (fb) {
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8;
p->plane[0].tiling = fb->modifier[0];
+ } else {
+ p->plane[0].enabled = false;
+ p->plane[0].bytes_per_pixel = 0;
+ p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
+ }
+ p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
- p->cursor.enabled = true;
- p->cursor.bytes_per_pixel = 4;
- p->cursor.horiz_pixels = intel_crtc->cursor_width ?
- intel_crtc->cursor_width : 64;
+ fb = crtc->cursor->state->fb;
+ if (fb) {
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
+ p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
+ } else {
+ p->cursor.enabled = false;
+ p->cursor.bytes_per_pixel = 0;
+ p->cursor.horiz_pixels = 64;
+ p->cursor.vert_pixels = 64;
+ }
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2822,7 +2958,7 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
{
- if (!intel_crtc_active(crtc))
+ if (!to_intel_crtc(crtc)->active)
return 0;
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
@@ -2996,12 +3132,11 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
static void
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
{
- struct drm_device *dev = dev_priv->dev;
int plane;
DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
- for_each_plane(pipe, plane) {
+ for_each_plane(dev_priv, pipe, plane) {
I915_WRITE(PLANE_SURF(pipe, plane),
I915_READ(PLANE_SURF(pipe, plane)));
}
@@ -3370,7 +3505,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
- if (!intel_crtc_active(crtc))
+ if (!intel_crtc->active)
return;
hw->dirty[pipe] = true;
@@ -3425,7 +3560,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
- active->pipe_enabled = intel_crtc_active(crtc);
+ active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -3539,41 +3674,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
pixel_size, enabled, scaled);
}
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_ggtt_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- return NULL;
-}
-
/**
* Lock protecting IPS related data structures
*/
@@ -3706,7 +3806,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
@@ -3716,9 +3816,15 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- limits = dev_priv->rps.max_freq_softlimit << 24;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= dev_priv->rps.min_freq_softlimit << 16;
+ if (IS_GEN9(dev_priv->dev)) {
+ limits = (dev_priv->rps.max_freq_softlimit) << 23;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+ } else {
+ limits = dev_priv->rps.max_freq_softlimit << 24;
+ if (val <= dev_priv->rps.min_freq_softlimit)
+ limits |= dev_priv->rps.min_freq_softlimit << 16;
+ }
return limits;
}
@@ -3726,6 +3832,8 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
@@ -3758,59 +3866,53 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
- I915_WRITE(GEN6_RP_UP_EI, 12500);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+ ei_up = 16000;
+ threshold_up = 95;
/* Downclock if less than 85% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 85;
break;
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
- I915_WRITE(GEN6_RP_UP_EI, 10250);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+ ei_up = 13000;
+ threshold_up = 90;
/* Downclock if less than 75% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 75;
break;
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
- I915_WRITE(GEN6_RP_UP_EI, 8000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+ ei_up = 10000;
+ threshold_up = 85;
/* Downclock if less than 60% busy over 32ms */
- I915_WRITE(GEN6_RP_DOWN_EI, 25000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ ei_down = 32000;
+ threshold_down = 60;
break;
}
+ I915_WRITE(GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_up));
+ I915_WRITE(GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+
+ I915_WRITE(GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(dev_priv, ei_down));
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
dev_priv->rps.power = new_power;
dev_priv->rps.last_adj = 0;
}
@@ -3847,7 +3949,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN9_FREQUENCY(val));
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -3860,7 +3965,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
POSTING_READ(GEN6_RPNSWREQ);
@@ -4081,6 +4186,13 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ if (IS_SKYLAKE(dev)) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ the natural hardware unit for SKL */
+ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+ dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+ }
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
@@ -4121,23 +4233,21 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev);
- I915_WRITE(GEN6_RPNSWREQ, 0xc800000);
- I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000);
+ /* Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+
+ /* 1 second timeout*/
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(dev_priv, 1000000));
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08);
- I915_WRITE(GEN6_RP_UP_EI, 0x101d0);
- I915_WRITE(GEN6_RP_DOWN_EI, 0x55730);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
- I915_WRITE(GEN6_PMINTRMSK, 0x6);
- I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
- gen6_enable_rps_interrupts(dev);
+ /* Leaning on the below call to gen6_set_rps to program/setup the
+ * Up/Down EI & threshold registers, as well as the RP_CONTROL,
+ * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4990,124 +5100,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
- drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
- dev_priv->ips.renderctx = NULL;
- }
-
- if (dev_priv->ips.pwrctx) {
- i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
- drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
- dev_priv->ips.pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->ips.renderctx == NULL)
- dev_priv->ips.renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.renderctx)
- return -ENOMEM;
-
- if (dev_priv->ips.pwrctx == NULL)
- dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- bool was_interruptible;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = ironlake_setup_rc6(dev);
- if (ret)
- return;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = intel_ring_begin(ring, 6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- dev_priv->mm.interruptible = was_interruptible;
- return;
- }
-
- intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- intel_ring_emit(ring, MI_SUSPEND_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_advance(ring);
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_ring_idle(ring);
- dev_priv->mm.interruptible = was_interruptible;
- if (ret) {
- DRM_ERROR("failed to enable ironlake power savings\n");
- ironlake_teardown_rc6(dev);
- return;
- }
-
- I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-
- intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
-}
-
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5620,12 +5612,7 @@ static void gen6_suspend_rps(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev);
}
/**
@@ -5655,7 +5642,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
- ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6) {
intel_suspend_gt_powersave(dev);
@@ -5683,12 +5669,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- /*
- * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
- * added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev);
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
@@ -5707,8 +5688,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
}
dev_priv->rps.enabled = true;
- if (INTEL_INFO(dev)->gen < 9)
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5726,7 +5706,6 @@ void intel_enable_gt_powersave(struct drm_device *dev)
if (IS_IRONLAKE_M(dev)) {
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
@@ -6259,11 +6238,22 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+}
+
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -6311,8 +6301,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
@@ -6349,9 +6337,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ vlv_init_display_clock_gating(dev_priv);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -6541,7 +6527,7 @@ void intel_init_pm(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
- dev_priv->display.update_wm = cherryview_update_wm;
+ dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
@@ -6709,7 +6695,9 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_gpu_freq(dev_priv, val);
@@ -6719,7 +6707,9 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_CHERRYVIEW(dev_priv->dev))
+ if (IS_GEN9(dev_priv->dev))
+ return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
+ else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_freq_opcode(dev_priv, val);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cd79c3843452..441e2502b889 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
-{
- int ret;
-
- if (!ring->fbc_dirty)
- return 0;
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
- /* WaFbcNukeOn3DBlt:ivb/hsw */
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, value);
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit(ring, MSG_FBC_REND_STATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
- intel_ring_advance(ring);
-
- ring->fbc_dirty = false;
- return 0;
-}
-
static int
gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
return 0;
}
@@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret;
}
- ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
- if (ret)
- return ret;
-
- if (!invalidate_domains && flush_domains)
- return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
- return 0;
+ return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -2477,7 +2444,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2486,7 +2452,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2506,7 +2472,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
+ if (INTEL_INFO(dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
@@ -2515,13 +2481,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (!invalidate && flush) {
- if (IS_GEN7(dev))
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
- else if (IS_BROADWELL(dev))
- dev_priv->fbc.need_sw_cache_clean = true;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8f3b49a23ccf..c761fe05ad6f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -267,7 +267,6 @@ struct intel_engine_cs {
*/
struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
- bool fbc_dirty;
wait_queue_head_t irq_queue;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6d8e29abbc33..ce00e6994eeb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -194,8 +194,39 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
- gen8_irq_power_well_post_enable(dev_priv);
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
+static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ if (power_well->data == SKL_DISP_PW_2) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ gen8_irq_power_well_post_enable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+ }
+
+ if (power_well->data == SKL_DISP_PW_1) {
+ intel_prepare_ddi(dev);
+ gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
+ }
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -293,7 +324,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
{
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
- bool check_fuse_status = false;
+ bool is_enabled, enable_requested, check_fuse_status = false;
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
@@ -324,15 +355,17 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
}
req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ enable_requested = tmp & req_mask;
state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ is_enabled = tmp & state_mask;
if (enable) {
- if (!(tmp & req_mask)) {
+ if (!enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
- DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
}
- if (!(tmp & state_mask)) {
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
state_mask), 1))
DRM_ERROR("%s enable timeout\n",
@@ -340,7 +373,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
check_fuse_status = true;
}
} else {
- if (tmp & req_mask) {
+ if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
@@ -358,6 +391,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
DRM_ERROR("PG2 distributing status timeout\n");
}
}
+
+ if (enable && !is_enabled)
+ skl_power_well_post_enable(dev_priv, power_well);
}
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1420,7 +1456,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * intel_aux_display_runtime_get - grab an auxiliary power domain reference
* @dev_priv: i915 device instance
*
* This function grabs a power domain reference for the auxiliary power domain
@@ -1437,10 +1473,10 @@ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
}
/**
- * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * intel_aux_display_runtime_put - release an auxiliary power domain reference
* @dev_priv: i915 device instance
*
- * This function drops the auxilliary power domain reference obtained by
+ * This function drops the auxiliary power domain reference obtained by
* intel_aux_display_runtime_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 64ad2b40179f..9e554c2cfbb4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1247,7 +1247,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
switch (crtc->config->pixel_multiplier) {
default:
- WARN(1, "unknown pixel mutlipler specified\n");
+ WARN(1, "unknown pixel multiplier specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7051da7015d3..a82873631851 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1361,10 +1361,10 @@ out_unlock:
int intel_plane_restore(struct drm_plane *plane)
{
- if (!plane->crtc || !plane->fb)
+ if (!plane->crtc || !plane->state->fb)
return 0;
- return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
+ return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb,
plane->state->crtc_x, plane->state->crtc_y,
plane->state->crtc_w, plane->state->crtc_h,
plane->state->src_x, plane->state->src_y,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8879f17770aa..ab5cc94588e1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -557,18 +557,24 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug--; /* Only report the first N failures */
}
}
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
- if (i915.mmio_debug)
+ static bool mmio_debug_once = true;
+
+ if (i915.mmio_debug || !mmio_debug_once)
return;
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ i915.mmio_debug = mmio_debug_once--;
}
}
@@ -1082,8 +1088,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
- * not working
+ * not working. In this stage we don't know which flavour this
+ * ivb is, so it is better to reset also the gen6 fw registers
+ * before the ecobus check.
*/
+
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ __raw_posting_read(dev_priv, ECOBUS);
+
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d13d1b5a859f..df09ca7c4889 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
+struct radeon_wait_cb {
+ struct fence_cb base;
+ struct task_struct *task;
+};
+
+static void
+radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+{
+ struct radeon_wait_cb *wait =
+ container_of(cb, struct radeon_wait_cb, base);
+
+ wake_up_process(wait->task);
+}
+
static signed long radeon_fence_default_wait(struct fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
- bool signaled;
+ struct radeon_wait_cb cb;
- fence_enable_sw_signaling(&fence->base);
+ cb.task = current;
- /*
- * This function has to return -EDEADLK, but cannot hold
- * exclusive_lock during the wait because some callers
- * may already hold it. This means checking needs_reset without
- * lock, and not fiddling with any gpu internals.
- *
- * The callback installed with fence_enable_sw_signaling will
- * run before our wait_event_*timeout call, so we will see
- * both the signaled fence and the changes to needs_reset.
- */
+ if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+ return t;
+
+ while (t > 0) {
+ if (intr)
+ set_current_state(TASK_INTERRUPTIBLE);
+ else
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ /*
+ * radeon_test_signaled must be called after
+ * set_current_state to prevent a race with wake_up_process
+ */
+ if (radeon_test_signaled(fence))
+ break;
+
+ if (rdev->needs_reset) {
+ t = -EDEADLK;
+ break;
+ }
+
+ t = schedule_timeout(t);
+
+ if (t > 0 && intr && signal_pending(current))
+ t = -ERESTARTSYS;
+ }
+
+ __set_current_state(TASK_RUNNING);
+ fence_remove_callback(f, &cb.base);
- if (intr)
- t = wait_event_interruptible_timeout(rdev->fence_queue,
- ((signaled = radeon_test_signaled(fence)) ||
- rdev->needs_reset), t);
- else
- t = wait_event_timeout(rdev->fence_queue,
- ((signaled = radeon_test_signaled(fence)) ||
- rdev->needs_reset), t);
-
- if (t > 0 && !signaled)
- return -EDEADLK;
return t;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 86e75798320f..b1d74bc375d8 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7236,8 +7236,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
if (!vclk || !dclk) {
- /* keep the Bypass mode, put PLL to sleep */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ /* keep the Bypass mode */
return 0;
}
@@ -7253,8 +7252,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
/* set VCO_MODE to 1 */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
- /* toggle UPLL_SLEEP to 1 then back to 0 */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ /* disable sleep mode */
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
/* deassert UPLL_RESET */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6c6b655defcf..e13b9cbc304e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err1;
}
- ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
- (dev_priv->vram_size >> PAGE_SHIFT));
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed initializing memory manager for VRAM.\n");
- goto out_err2;
- }
-
- dev_priv->has_gmr = true;
- if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- VMW_PL_GMR) != 0) {
- DRM_INFO("No GMR memory available. "
- "Graphics memory resources are very limited.\n");
- dev_priv->has_gmr = false;
- }
-
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
- dev_priv->has_mob = true;
- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
- VMW_PL_MOB) != 0) {
- DRM_INFO("No MOB memory available. "
- "3D will be disabled.\n");
- dev_priv->has_mob = false;
- }
- }
-
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+
+ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+ (dev_priv->vram_size >> PAGE_SHIFT));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+ goto out_no_vram;
+ }
+
+ dev_priv->has_gmr = true;
+ if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+ refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+ VMW_PL_GMR) != 0) {
+ DRM_INFO("No GMR memory available. "
+ "Graphics memory resources are very limited.\n");
+ dev_priv->has_gmr = false;
+ }
+
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ dev_priv->has_mob = true;
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+ VMW_PL_MOB) != 0) {
+ DRM_INFO("No MOB memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
+ }
+
vmw_kms_save_vga(dev_priv);
/* Start kms and overlay systems, needs fifo. */
@@ -838,6 +839,12 @@ out_no_fifo:
vmw_kms_close(dev_priv);
out_no_kms:
vmw_kms_restore_vga(dev_priv);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ if (dev_priv->has_gmr)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_no_vram:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -853,12 +860,6 @@ out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
- if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
- if (dev_priv->has_gmr)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
-out_err2:
(void)ttm_bo_device_release(&dev_priv->bdev);
out_err1:
vmw_ttm_global_release(dev_priv);
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
}
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
+
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ if (dev_priv->has_gmr)
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
- if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
- if (dev_priv->has_gmr)
- (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ pci_disable_device(pdev);
drm_put_dev(dev);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33176d05db35..654c8daeb5ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_no_reloc;
}
bo = &vmw_bo->base;
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
- vmw_bo_p = NULL;
+ *vmw_bo_p = NULL;
return ret;
}
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_no_reloc;
}
bo = &vmw_bo->base;
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
- vmw_bo_p = NULL;
+ *vmw_bo_p = NULL;
return ret;
}
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
NULL, arg->command_size, arg->throttle_us,
(void __user *)(unsigned long)arg->fence_rep,
NULL);
-
+ ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
- goto out_unlock;
+ return ret;
vmw_kms_cursor_post_execbuf(dev_priv);
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8725b79e7847..07cda8cbbddb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
- goto out_unlock;
+ return 0;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL);
- if (unlikely(!rects)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (unlikely(!rects))
+ return -ENOMEM;
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
out_free:
kfree(rects);
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 210cf4874cb7..edf274cabe81 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev)
status = driver->remove(client);
}
- if (dev->of_node)
- irq_dispose_mapping(client->irq);
-
dev_pm_domain_detach(&client->dev, true);
return status;
}
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 8ff612d160b0..563932500ff1 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
input_set_drvdata(input, keypad);
- error = request_threaded_irq(irq, NULL,
- tc3589x_keypad_irq, plat->irqtype,
- "tc3589x-keypad", keypad);
+ error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
+ plat->irqtype | IRQF_ONESHOT,
+ "tc3589x-keypad", keypad);
if (error < 0) {
dev_err(&pdev->dev,
"Could not allocate irq %d,error %d\n",
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 59d4dcddf6de..98228773a111 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c,
idev->private = m;
idev->input->name = MMA8450_DRV_NAME;
idev->input->id.bustype = BUS_I2C;
+ idev->input->dev.parent = &c->dev;
idev->poll = mma8450_poll;
idev->poll_interval = POLL_INTERVAL;
idev->poll_interval_max = POLL_INTERVAL_MAX;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d28726a0ef85..1bd15ebc01f2 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
return -ENOMEM;
error = alps_identify(psmouse, priv);
- if (error)
+ if (error) {
+ kfree(priv);
return error;
+ }
if (set_properties) {
psmouse->vendor = "ALPS";
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 77e9d70a986b..1e2291c378fe 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -20,7 +20,7 @@
#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include "cyapa.h"
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index ddf5393a1180..5b611dd71e79 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -17,7 +17,7 @@
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/slab.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include <linux/crc-itu-t.h>
#include "cyapa.h"
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
electrodes_tx = cyapa->electrodes_x;
max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) &
~7u) * electrodes_tx;
- } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) {
+ } else {
offset = 2;
max_element_cnt = cyapa->electrodes_x +
cyapa->electrodes_y;
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 757f78a94aec..23d259416f2f 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse)
#define FOC_MAX_FINGERS 5
-#define FOC_MAX_X 2431
-#define FOC_MAX_Y 1663
-
/*
* Current state of a single finger on the touchpad.
*/
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse)
input_mt_slot(dev, i);
input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
if (active) {
- input_report_abs(dev, ABS_MT_POSITION_X, finger->x);
+ unsigned int clamped_x, clamped_y;
+ /*
+ * The touchpad might report invalid data, so we clamp
+ * the resulting values so that we do not confuse
+ * userspace.
+ */
+ clamped_x = clamp(finger->x, 0U, priv->x_max);
+ clamped_y = clamp(finger->y, 0U, priv->y_max);
+ input_report_abs(dev, ABS_MT_POSITION_X, clamped_x);
input_report_abs(dev, ABS_MT_POSITION_Y,
- FOC_MAX_Y - finger->y);
+ priv->y_max - clamped_y);
}
}
input_mt_report_pointer_emulation(dev, true);
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse,
state->pressed = (packet[0] >> 4) & 1;
- /*
- * packet[5] contains some kind of tool size in the most
- * significant nibble. 0xff is a special value (latching) that
- * signals a large contact area.
- */
- if (packet[5] == 0xff) {
- state->fingers[finger].valid = false;
- return;
- }
-
state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2];
state->fingers[finger].y = (packet[3] << 8) | packet[4];
state->fingers[finger].valid = true;
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse)
return 0;
}
+
+void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+ /* not supported yet */
+}
+
+static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+ /* not supported yet */
+}
+
+static void focaltech_set_scale(struct psmouse *psmouse,
+ enum psmouse_scale scale)
+{
+ /* not supported yet */
+}
+
int focaltech_init(struct psmouse *psmouse)
{
struct focaltech_data *priv;
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse)
psmouse->cleanup = focaltech_reset;
/* resync is not supported yet */
psmouse->resync_time = 0;
+ /*
+ * rate/resolution/scale changes are not supported yet, and
+ * the generic implementations of these functions seem to
+ * confuse some touchpads
+ */
+ psmouse->set_resolution = focaltech_set_resolution;
+ psmouse->set_rate = focaltech_set_rate;
+ psmouse->set_scale = focaltech_set_scale;
return 0;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 4ccd01d7a48d..8bc61237bc1b 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
}
/*
+ * Here we set the mouse scaling.
+ */
+
+static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale)
+{
+ ps2_command(&psmouse->ps2dev, NULL,
+ scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 :
+ PSMOUSE_CMD_SETSCALE11);
+}
+
+/*
* psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
*/
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
+ psmouse->set_scale = psmouse_set_scale;
psmouse->poll = psmouse_poll;
psmouse->protocol_handler = psmouse_process_byte;
psmouse->pktsize = 3;
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse)
if (psmouse_max_proto != PSMOUSE_PS2) {
psmouse->set_rate(psmouse, psmouse->rate);
psmouse->set_resolution(psmouse, psmouse->resolution);
- ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
+ psmouse->set_scale(psmouse, PSMOUSE_SCALE11);
}
}
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index c2ff137ecbdb..d02e1bdc9ae4 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -36,6 +36,11 @@ typedef enum {
PSMOUSE_FULL_PACKET
} psmouse_ret_t;
+enum psmouse_scale {
+ PSMOUSE_SCALE11,
+ PSMOUSE_SCALE21
+};
+
struct psmouse {
void *private;
struct input_dev *dev;
@@ -67,6 +72,7 @@ struct psmouse {
psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse);
void (*set_rate)(struct psmouse *psmouse, unsigned int rate);
void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution);
+ void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale);
int (*reconnect)(struct psmouse *psmouse);
void (*disconnect)(struct psmouse *psmouse);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 58917525126e..6261fd6d7c3c 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I
tristate "Allwinner sun4i resistive touchscreen controller support"
depends on ARCH_SUNXI || COMPILE_TEST
depends on HWMON
+ depends on THERMAL || !THERMAL_OF
help
This selects support for the resistive touchscreen controller
found on Allwinner sunxi SoCs.
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index baa0d9786f50..1ae4e547b419 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
+ depends on ARM || ARM64 || COMPILE_TEST
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -63,6 +64,7 @@ config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
+ depends on BROKEN
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7ce52737c7a1..dc14fec4ede1 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = {
static int __init exynos_iommu_init(void)
{
+ struct device_node *np;
int ret;
+ np = of_find_matching_node(NULL, sysmmu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
if (!lv2table_kmem_cache) {
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 5a500edf00cc..b610a8dee238 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -56,7 +56,8 @@
((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
* (d)->bits_per_level) + (d)->pg_shift)
-#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
+#define ARM_LPAE_PAGES_PER_PGD(d) \
+ DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
/*
* Calculate the index at level l used to map virtual address a using the
@@ -66,7 +67,7 @@
((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
#define ARM_LPAE_LVL_IDX(a,l,d) \
- (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
+ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
/* Calculate the block/page mapping size at level l for pagetable in d. */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index f59f857b702e..a4ba851825c2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void)
struct kmem_cache *p;
const unsigned long flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, omap_iommu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
iopte_cachep_ctor);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 6a8b1ec4a48a..9f74fddcd304 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = {
static int __init rk_iommu_init(void)
{
+ struct device_node *np;
int ret;
+ np = of_find_matching_node(NULL, rk_iommu_dt_ids);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 463c235acbdc..4387dae14e45 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
static u32 doorbell_mask_reg;
+static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
{
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
armada_xp_mpic_smp_cpu_init();
+
return NOTIFY_OK;
}
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
.priority = 100,
};
+static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mpic_cascaded_cpu_notifier = {
+ .notifier_call = mpic_cascaded_secondary_init,
+ .priority = 100,
+};
+
#endif /* CONFIG_SMP */
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
- int parent_irq, nr_irqs, i;
+ int nr_irqs, i;
u32 control;
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
#endif
} else {
+#ifdef CONFIG_SMP
+ register_cpu_notifier(&mpic_cascaded_cpu_notifier);
+#endif
irq_set_chained_handler(parent_irq,
armada_370_xp_mpic_handle_cascade_irq);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d8996bdf0f61..596b0a9eee99 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its,
{
struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
struct its_collection *sync_col;
+ unsigned long flags;
- raw_spin_lock(&its->lock);
+ raw_spin_lock_irqsave(&its->lock, flags);
cmd = its_allocate_entry(its);
if (!cmd) { /* We're soooooo screewed... */
pr_err_ratelimited("ITS can't allocate, dropping command\n");
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
return;
}
sync_col = builder(cmd, desc);
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its,
post:
next_cmd = its_post_commands(its);
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
its_wait_for_range_completion(its, cmd, next_cmd);
}
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its)
{
int err;
int i;
- int psz = PAGE_SIZE;
+ int psz = SZ_64K;
u64 shr = GITS_BASER_InnerShareable;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
u64 type = GITS_BASER_TYPE(val);
u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
+ int order = get_order(psz);
+ int alloc_size;
u64 tmp;
void *base;
if (type == GITS_BASER_TYPE_NONE)
continue;
- /* We're lazy and only allocate a single page for now */
- base = (void *)get_zeroed_page(GFP_KERNEL);
+ /*
+ * Allocate as many entries as required to fit the
+ * range of device IDs that the ITS can grok... The ID
+ * space being incredibly sparse, this results in a
+ * massive waste of memory.
+ *
+ * For other tables, only allocate a single page.
+ */
+ if (type == GITS_BASER_TYPE_DEVICE) {
+ u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u32 ids = GITS_TYPER_DEVBITS(typer);
+
+ order = get_order((1UL << ids) * entry_size);
+ if (order >= MAX_ORDER) {
+ order = MAX_ORDER - 1;
+ pr_warn("%s: Device Table too large, reduce its page order to %u\n",
+ its->msi_chip.of_node->full_name, order);
+ }
+ }
+
+ alloc_size = (1 << order) * PAGE_SIZE;
+ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!base) {
err = -ENOMEM;
goto out_free;
@@ -841,7 +864,7 @@ retry_baser:
break;
}
- val |= (PAGE_SIZE / psz) - 1;
+ val |= (alloc_size / psz) - 1;
writeq_relaxed(val, its->base + GITS_BASER + i * 8);
tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -882,7 +905,7 @@ retry_baser:
}
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(PAGE_SIZE / entry_size),
+ (int)(alloc_size / entry_size),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void)
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
{
struct its_device *its_dev = NULL, *tmp;
+ unsigned long flags;
- raw_spin_lock(&its->lock);
+ raw_spin_lock_irqsave(&its->lock, flags);
list_for_each_entry(tmp, &its->its_device_list, entry) {
if (tmp->device_id == dev_id) {
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
}
}
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
return its_dev;
}
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
{
struct its_device *dev;
unsigned long *lpi_map;
+ unsigned long flags;
void *itt;
int lpi_base;
int nr_lpis;
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2UL, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kmalloc(sz, GFP_KERNEL);
+ itt = kzalloc(sz, GFP_KERNEL);
lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
if (!dev || !itt || !lpi_map) {
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
- raw_spin_lock(&its->lock);
+ raw_spin_lock_irqsave(&its->lock, flags);
list_add(&dev->entry, &its->its_device_list);
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
/* Bind the device to the first possible CPU */
cpu = cpumask_first(cpu_online_mask);
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
static void its_free_device(struct its_device *its_dev)
{
- raw_spin_lock(&its_dev->its->lock);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
- raw_spin_unlock(&its_dev->its->lock);
+ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->itt);
kfree(its_dev);
}
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
return 0;
}
+struct its_pci_alias {
+ struct pci_dev *pdev;
+ u32 dev_id;
+ u32 count;
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev)
+{
+ int msi, msix;
+
+ msi = max(pci_msi_vec_count(pdev), 0);
+ msix = max(pci_msix_vec_count(pdev), 0);
+
+ return max(msi, msix);
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct its_pci_alias *dev_alias = data;
+
+ dev_alias->dev_id = alias;
+ if (pdev != dev_alias->pdev)
+ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+
+ return 0;
+}
+
static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
struct pci_dev *pdev;
struct its_node *its;
- u32 dev_id;
struct its_device *its_dev;
+ struct its_pci_alias dev_alias;
if (!dev_is_pci(dev))
return -EINVAL;
pdev = to_pci_dev(dev);
- dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ dev_alias.pdev = pdev;
+ dev_alias.count = nvec;
+
+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
its = domain->parent->host_data;
- its_dev = its_find_device(its, dev_id);
- if (WARN_ON(its_dev))
- return -EINVAL;
+ its_dev = its_find_device(its, dev_alias.dev_id);
+ if (its_dev) {
+ /*
+ * We already have seen this ID, probably through
+ * another alias (PCI bridge of some sort). No need to
+ * create the device.
+ */
+ dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
+ goto out;
+ }
- its_dev = its_create_device(its, dev_id, nvec);
+ its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
if (!its_dev)
return -ENOMEM;
- dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
-
+ dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
+ dev_alias.count, ilog2(dev_alias.count));
+out:
info->scratchpad[0].ptr = its_dev;
info->scratchpad[1].ptr = dev;
return 0;
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = {
.deactivate = its_irq_domain_deactivate,
};
+static int its_force_quiescent(void __iomem *base)
+{
+ u32 count = 1000000; /* 1s */
+ u32 val;
+
+ val = readl_relaxed(base + GITS_CTLR);
+ if (val & GITS_CTLR_QUIESCENT)
+ return 0;
+
+ /* Disable the generation of all interrupts to this ITS */
+ val &= ~GITS_CTLR_ENABLE;
+ writel_relaxed(val, base + GITS_CTLR);
+
+ /* Poll GITS_CTLR and wait until ITS becomes quiescent */
+ while (1) {
+ val = readl_relaxed(base + GITS_CTLR);
+ if (val & GITS_CTLR_QUIESCENT)
+ return 0;
+
+ count--;
+ if (!count)
+ return -EBUSY;
+
+ cpu_relax();
+ udelay(1);
+ }
+}
+
static int its_probe(struct device_node *node, struct irq_domain *parent)
{
struct resource res;
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
goto out_unmap;
}
+ err = its_force_quiescent(its_base);
+ if (err) {
+ pr_warn("%s: failed to quiesce, giving up\n",
+ node->full_name);
+ goto out_unmap;
+ }
+
pr_info("ITS: %s\n", node->full_name);
its = kzalloc(sizeof(*its), GFP_KERNEL);
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
writeq_relaxed(baser, its->base + GITS_CBASER);
tmp = readq_relaxed(its->base + GITS_CBASER);
writeq_relaxed(0, its->base + GITS_CWRITER);
- writel_relaxed(1, its->base + GITS_CTLR);
+ writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
pr_info("ITS: using cache flushing for cmd queue\n");
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void)
int its_cpu_init(void)
{
- if (!gic_rdists_supports_plpis()) {
- pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
- return -ENXIO;
- }
-
if (!list_empty(&its_nodes)) {
+ if (!gic_rdists_supports_plpis()) {
+ pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+ return -ENXIO;
+ }
its_cpu_init_lpis();
its_cpu_init_collection();
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1c6dea2fbc34..fd8850def1b8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
tlist |= 1 << (mpidr & 0xf);
cpu = cpumask_next(cpu, mask);
- if (cpu == nr_cpu_ids)
+ if (cpu >= nr_cpu_ids)
goto out;
mpidr = cpu_logical_map(cpu);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4634cf7d0ec3..471e1cdc1933 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d)
static void gic_mask_irq(struct irq_data *d)
{
u32 mask = 1 << (gic_irq(d) % 32);
+ unsigned long flags;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_unmask_irq(struct irq_data *d)
{
u32 mask = 1 << (gic_irq(d) % 32);
+ unsigned long flags;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
+ unsigned long flags;
int ret;
/* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
ret = gic_configure_irq(gicirq, type, base, NULL);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return ret;
}
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
u32 val, mask, bit;
+ unsigned long flags;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift;
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return IRQ_SET_MASK_OK;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b76a173cd95..5897d8d8fa5a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI
config MTD_NAND_HISI504
tristate "Support for NAND controller on Hisilicon SoC Hip04"
+ depends on HAS_DMA
help
Enables support for NAND controller on Hisilicon SoC Hip04.
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 96b0b1d27df1..10b1f7a4fe50 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
nand_writel(info, NDCR, ndcr | int_mask);
}
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
+{
+ if (info->ecc_bch) {
+ int timeout;
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO 8 32 bits reads at a time, and skip
+ * the polling on the last read.
+ */
+ while (len > 8) {
+ __raw_readsl(info->mmio_base + NDDB, data, 8);
+
+ for (timeout = 0;
+ !(nand_readl(info, NDSR) & NDSR_RDDREQ);
+ timeout++) {
+ if (timeout >= 5) {
+ dev_err(&info->pdev->dev,
+ "Timeout on RDDREQ while draining the FIFO\n");
+ return;
+ }
+
+ mdelay(1);
+ }
+
+ data += 32;
+ len -= 8;
+ }
+ }
+
+ __raw_readsl(info->mmio_base + NDDB, data, len);
+}
+
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
unsigned int do_bytes = min(info->data_size, info->chunk_size);
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
DIV_ROUND_UP(info->oob_size, 4));
break;
case STATE_PIO_READING:
- __raw_readsl(info->mmio_base + NDDB,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(do_bytes, 4));
+ drain_fifo(info,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
if (info->oob_size > 0)
- __raw_readsl(info->mmio_base + NDDB,
- info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->oob_size, 4));
+ drain_fifo(info,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev)
int ret, irq, cs;
pdata = dev_get_platdata(&pdev->dev);
+ if (pdata->num_cs <= 0)
+ return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
if (!info)
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c82e02e3dae..b0f69248cb71 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 2928f7003041..a316fa4b91ab 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,7 @@
* Copyright (C) 2015 Valeo S.A.
*/
+#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -584,8 +585,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
while (pos <= actual_len - MSG_HEADER_LEN) {
tmp = buf + pos;
- if (!tmp->len)
- break;
+ /* Handle messages crossing the USB endpoint max packet
+ * size boundary. Check kvaser_usb_read_bulk_callback()
+ * for further details.
+ */
+ if (tmp->len == 0) {
+ pos = round_up(pos,
+ dev->bulk_in->wMaxPacketSize);
+ continue;
+ }
if (pos + tmp->len > actual_len) {
dev_err(dev->udev->dev.parent,
@@ -787,7 +795,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
netdev_err(netdev, "Error transmitting URB\n");
usb_unanchor_urb(urb);
usb_free_urb(urb);
- kfree(buf);
return err;
}
@@ -1317,8 +1324,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
while (pos <= urb->actual_length - MSG_HEADER_LEN) {
msg = urb->transfer_buffer + pos;
- if (!msg->len)
- break;
+ /* The Kvaser firmware can only read and write messages that
+ * does not cross the USB's endpoint wMaxPacketSize boundary.
+ * If a follow-up command crosses such boundary, firmware puts
+ * a placeholder zero-length command in its place then aligns
+ * the real command to the next max packet size.
+ *
+ * Handle such cases or we're going to miss a significant
+ * number of events in case of a heavy rx load on the bus.
+ */
+ if (msg->len == 0) {
+ pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
+ continue;
+ }
if (pos + msg->len > urb->actual_length) {
dev_err(dev->udev->dev.parent, "Format error\n");
@@ -1326,7 +1344,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
}
kvaser_usb_handle_message(dev, msg);
-
pos += msg->len;
}
@@ -1615,8 +1632,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
struct urb *urb;
void *buf;
struct kvaser_msg *msg;
- int i, err;
- int ret = NETDEV_TX_OK;
+ int i, err, ret = NETDEV_TX_OK;
u8 *msg_tx_can_flags = NULL; /* GCC */
if (can_dropped_invalid_skb(netdev, skb))
@@ -1634,7 +1650,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
- goto nobufmem;
+ goto freeurb;
}
msg = buf;
@@ -1681,8 +1697,10 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
/* This should never happen; it implies a flow control bug */
if (!context) {
netdev_warn(netdev, "cannot find free context\n");
+
+ kfree(buf);
ret = NETDEV_TX_BUSY;
- goto releasebuf;
+ goto freeurb;
}
context->priv = priv;
@@ -1719,16 +1737,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
else
netdev_warn(netdev, "Failed tx_urb %d\n", err);
- goto releasebuf;
+ goto freeurb;
}
- usb_free_urb(urb);
-
- return NETDEV_TX_OK;
+ ret = NETDEV_TX_OK;
-releasebuf:
- kfree(buf);
-nobufmem:
+freeurb:
usb_free_urb(urb);
return ret;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 962c3f027383..0bac0f14edc3 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
pdev->usb_if = ppdev->usb_if;
pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
+
+ /* do a copy of the ctrlmode[_supported] too */
+ dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
+ dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 869d97fcf781..b927021c6c40 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (!efi_enabled(EFI_BOOT)) {
+ if (pdata->clk) {
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4de62b210c85..635a83be7e5e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", },
+ { "APMC0D30", },
+ { "APMC0D31", },
{ }
};
MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#ifdef CONFIG_OF
static struct of_device_id xgene_enet_of_match[] = {
{.compatible = "apm,xgene-enet",},
+ {.compatible = "apm,xgene1-sgenet",},
+ {.compatible = "apm,xgene1-xgenet",},
{},
};
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 21206d33b638..a7f2cc3e485e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- int tx_work_done, rx_work_done;
+ int rx_work_done;
priv = container_of(napi, struct bcm_enet_priv, napi);
dev = priv->net_dev;
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- tx_work_done = bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
spin_unlock(&priv->rx_lock);
- if (rx_work_done >= budget || tx_work_done > 0) {
- /* rx/tx queue is not yet empty/clean */
+ if (rx_work_done >= budget) {
+ /* rx queue is not yet empty/clean */
return rx_work_done;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 676ffe093180..0469f72c6e7e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
slot->skb = skb;
slot->dma_addr = dma_addr;
- if (slot->dma_addr & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
-
return 0;
}
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring->mmio_base);
goto err_dma_free;
}
- if (ring->dma_base & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
err = -ENOMEM;
goto err_dma_free;
}
- if (ring->dma_base & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
BGMAC_DMA_RING_RX);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7155e1d2c208..bef750a09027 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
+ /* Set PCIe reset type to fundamental for EEH recovery */
+ pdev->needs_freset = 1;
+
/* AER (Advanced Error reporting) configuration */
rc = pci_enable_pcie_error_reporting(pdev);
if (!rc)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 149a0d70c108..b97122926d3a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
return -EINVAL;
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (wol->wolopts & WAKE_MAGICSECURE) {
bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
UMAC_MPD_PW_MS);
bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
UMAC_MPD_PW_LS);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_PW_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ } else {
+ reg &= ~MPD_PW_EN;
}
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ad76b8e35a00..81d41539fcba 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = {
};
#if defined(CONFIG_OF)
-static struct macb_config pc302gem_config = {
+static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
};
-static struct macb_config sama5d3_config = {
+static const struct macb_config sama5d3_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
};
-static struct macb_config sama5d4_config = {
+static const struct macb_config sama5d4_config = {
.caps = 0,
.dma_burst_length = 4,
};
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp)
if (bp->pdev->dev.of_node) {
match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
if (match && match->data) {
- config = (const struct macb_config *)match->data;
+ config = match->data;
bp->caps = config->caps;
/*
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 31dc080f2437..ff85619a9732 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -351,7 +351,7 @@
/* Bitfields in MID */
#define MACB_IDNUM_OFFSET 16
-#define MACB_IDNUM_SIZE 16
+#define MACB_IDNUM_SIZE 12
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9bb6220663b2..99492b7e3713 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1597,7 +1597,7 @@ fec_enet_interrupt(int irq, void *dev_id)
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
- if (fep->work_tx || fep->work_rx) {
+ if ((fep->work_tx || fep->work_rx) && fep->link) {
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
@@ -3383,7 +3383,6 @@ fec_drv_remove(struct platform_device *pdev)
regulator_disable(fep->reg_phy);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- fec_enet_clk_enable(ndev, false);
of_node_put(fep->phy_node);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 178e54028d10..7bf3682cdf47 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
+static int gfar_of_group_count(struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ if (!of_node_cmp(child->name, "queue-group"))
+ num++;
+
+ return num;
+}
+
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = of_get_available_child_count(np);
+ unsigned int num_grps = gfar_of_group_count(np);
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
/* Parse and initialize group specific information */
if (priv->mode == MQ_MG_MODE) {
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
+ if (of_node_cmp(child->name, "queue-group"))
+ continue;
+
err = gfar_parse_group(child, priv, model);
if (err)
goto err_grp_init;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 209ee1b27f8d..5d093dc0f5f5 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -92,6 +92,7 @@ static const char version[] =
#include "smc91x.h"
#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/assabet.h>
#include <mach/neponset.h>
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index fb846ebba1d9..f9b42f11950f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
const char *mac = NULL;
+ int irq, wol_irq, lpi_irq;
+
+ /* Get IRQ information early to have an ability to ask for deferred
+ * probe if needed before we went too far with resource allocation.
+ */
+ irq = platform_get_irq_byname(pdev, "macirq");
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER) {
+ dev_err(dev,
+ "MAC IRQ configuration information not found\n");
+ }
+ return irq;
+ }
+
+ /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (wol_irq < 0) {
+ if (wol_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ wol_irq = irq;
+ }
+
+ lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (lpi_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return PTR_ERR(priv);
}
+ /* Copy IRQ values to priv structure which is now avaialble */
+ priv->dev->irq = irq;
+ priv->wol_irq = wol_irq;
+ priv->lpi_irq = lpi_irq;
+
/* Get MAC address if available (DT) */
if (mac)
memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
- /* Get the MAC information */
- priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
- if (priv->dev->irq < 0) {
- if (priv->dev->irq != -EPROBE_DEFER) {
- netdev_err(priv->dev,
- "MAC IRQ configuration information not found\n");
- }
- return priv->dev->irq;
- }
-
- /*
- * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
- * The external wake up irq can be passed through the platform code
- * named as "eth_wake_irq"
- *
- * In case the wake up interrupt is not passed from the platform
- * so the driver will continue to use the mac irq (ndev->irq)
- */
- priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq < 0) {
- if (priv->wol_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- priv->wol_irq = priv->dev->irq;
- }
-
- priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (priv->lpi_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
platform_set_drvdata(pdev, priv->dev);
pr_debug("STMMAC platform driver registration completed");
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f1ee71e22241..7d394846afc2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1730,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list)
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
team->ops.port_change_dev_addr(team, port);
- rcu_read_unlock();
+ mutex_unlock(&team->lock);
return 0;
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f38227afe099..3aa8648080c8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
unsigned int num_queues = vif->num_queues;
int i;
unsigned int queue_index;
- struct xenvif_stats *vif_stats;
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
unsigned long accum = 0;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
- vif_stats = &vif->queues[queue_index].stats;
+ void *vif_stats = &vif->queues[queue_index].stats;
accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
}
data[i] = accum;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c4d68d768408..cab9f5257f57 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1349,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
{
unsigned int offset = skb_headlen(skb);
skb_frag_t frags[MAX_SKB_FRAGS];
- int i;
+ int i, f;
struct ubuf_info *uarg;
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
@@ -1389,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
frags[i].page_offset = 0;
skb_frag_size_set(&frags[i], len);
}
- /* swap out with old one */
- memcpy(skb_shinfo(skb)->frags,
- frags,
- i * sizeof(skb_frag_t));
- skb_shinfo(skb)->nr_frags = i;
- skb->truesize += i * PAGE_SIZE;
- /* remove traces of mapped pages and frag_list */
+ /* Copied all the bits from the frag list -- free it. */
skb_frag_list_init(skb);
+ xenvif_skb_zerocopy_prepare(queue, nskb);
+ kfree_skb(nskb);
+
+ /* Release all the original (foreign) frags. */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_frag_unref(skb, f);
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
uarg->callback(uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
+ /* Fill the skb with the new (local) frags. */
+ memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
+ skb_shinfo(skb)->nr_frags = i;
+ skb->truesize += i * PAGE_SIZE;
return 0;
}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 38d1c51f58b1..7bcaeec876c0 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -84,8 +84,7 @@ config OF_RESOLVE
bool
config OF_OVERLAY
- bool
- depends on OF
+ bool "Device Tree overlays"
select OF_DYNAMIC
select OF_RESOLVE
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0a8aeb8523fe..adb8764861c0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
const char *path)
{
struct device_node *child;
- int len = strchrnul(path, '/') - path;
- int term;
+ int len;
+ const char *end;
+ end = strchr(path, ':');
+ if (!end)
+ end = strchrnul(path, '/');
+
+ len = end - path;
if (!len)
return NULL;
- term = strchrnul(path, ':') - path;
- if (term < len)
- len = term;
-
__for_each_child_of_node(parent, child) {
const char *name = strrchr(child->full_name, '/');
if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
/* The path could begin with an alias */
if (*path != '/') {
- char *p = strchrnul(path, '/');
- int len = separator ? separator - path : p - path;
+ int len;
+ const char *p = separator;
+
+ if (!p)
+ p = strchrnul(path, '/');
+ len = p - path;
/* of_aliases must not be NULL */
if (!of_aliases)
@@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
path++; /* Increment past '/' delimiter */
np = __of_find_node_by_path(np, path);
path = strchrnul(path, '/');
+ if (separator && separator < path)
+ break;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
@@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (IS_ENABLED(CONFIG_PPC) && !name)
name = of_get_property(of_aliases, "stdout", NULL);
- if (name)
+ if (name) {
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
+ add_preferred_console("stdout-path", 0, NULL);
+ }
}
if (!of_aliases)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 352b4f28f82c..dee9270ba547 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/idr.h>
#include "of_private.h"
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
struct device_node *target, struct device_node *child)
{
const char *cname;
- struct device_node *tchild, *grandchild;
+ struct device_node *tchild;
int ret = 0;
cname = kbasename(child->full_name);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0cf9a236d438..aba8946cac46 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void)
"option path test failed\n");
of_node_put(np);
+ np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
+ selftest(np && !strcmp("test/option", options),
+ "option path test, subcase #1 failed\n");
+ of_node_put(np);
+
np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
selftest(np, "NULL option path test failed\n");
of_node_put(np);
@@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void)
"option alias path test failed\n");
of_node_put(np);
+ np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
+ &options);
+ selftest(np && !strcmp("test/alias/option", options),
+ "option alias path test, subcase #1 failed\n");
+ of_node_put(np);
+
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
selftest(np, "NULL option alias path test failed\n");
of_node_put(np);
@@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void)
rc = of_property_match_string(np, "phandle-list-names", "first");
selftest(rc == 0, "first expected:0 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "second");
- selftest(rc == 1, "second expected:0 got:%i\n", rc);
+ selftest(rc == 1, "second expected:1 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "third");
- selftest(rc == 2, "third expected:0 got:%i\n", rc);
+ selftest(rc == 2, "third expected:2 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "fourth");
selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
rc = of_property_match_string(np, "missing-property", "blah");
@@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void)
struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
struct of_changeset chgset;
- of_changeset_init(&chgset);
n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
selftest(n1, "testcase setup failure\n");
n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
@@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path)
return pdev != NULL;
}
-#if IS_ENABLED(CONFIG_I2C)
+#if IS_BUILTIN(CONFIG_I2C)
/* get the i2c client device instantiated at the path */
static struct i2c_client *of_path_to_i2c_client(const char *path)
@@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void)
return;
}
-#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
+#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
struct selftest_i2c_bus_data {
struct platform_device *pdev;
@@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = {
.id_table = selftest_i2c_dev_id,
};
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
struct selftest_i2c_mux_data {
int nchans;
@@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void)
"could not register selftest i2c bus driver\n"))
return ret;
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
ret = i2c_add_driver(&selftest_i2c_mux_driver);
if (selftest(ret == 0,
"could not register selftest i2c mux driver\n"))
@@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void)
static void of_selftest_overlay_i2c_cleanup(void)
{
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
i2c_del_driver(&selftest_i2c_mux_driver);
#endif
platform_driver_unregister(&selftest_i2c_bus_driver);
@@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void)
of_selftest_overlay_10();
of_selftest_overlay_11();
-#if IS_ENABLED(CONFIG_I2C)
+#if IS_BUILTIN(CONFIG_I2C)
if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
goto out;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index aab55474dd0d..ee082c0366ec 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
return false;
}
-static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int offset)
{
struct xgene_pcie_port *port = bus->sysdata;
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
return NULL;
xgene_pcie_set_rtdid_reg(bus, devfn);
- return xgene_pcie_get_cfg_base(bus);
+ return xgene_pcie_get_cfg_base(bus) + offset;
}
static struct pci_ops xgene_pcie_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index aa012fb3834b..312f23a8429c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev,
struct pci_dev *pdev = to_pci_dev(dev);
char *driver_override, *old = pdev->driver_override, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n", pdev->driver_override);
+ return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b899947d839d..1245dca79009 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3444,13 +3444,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_requested_microamps.attr)
return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
- /* all the other attributes exist to support constraints;
- * don't show them if there are no constraints, or if the
- * relevant supporting methods are missing.
- */
- if (!rdev->constraints)
- return 0;
-
/* constraints need specific supporting methods */
if (attr == &dev_attr_min_microvolts.attr ||
attr == &dev_attr_max_microvolts.attr)
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index bc6100103f7f..f0489cb9018b 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
config.regmap = chip->regmap;
config.of_node = dev->of_node;
+ /* Mask all interrupt sources to deassert interrupt line */
+ error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0);
+ if (!error)
+ error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0);
+ if (error) {
+ dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error);
+ return error;
+ }
+
rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 1f93b752a81c..3fd44353cc80 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(0),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG2",
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(1),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG3",
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_BUCK4_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(2),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG4",
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(3),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG5",
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(4),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG6",
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(5),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG7",
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(6),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "LDO_REG8",
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_LDO_VSEL_MASK,
.enable_reg = RK808_LDO_EN_REG,
.enable_mask = BIT(7),
+ .enable_time = 400,
.owner = THIS_MODULE,
}, {
.name = "SWITCH_REG1",
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4241eeab3386..f4cf6851fae9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = {
static struct s3c_rtc_data const s3c6410_rtc_data = {
.max_user_freq = 32768,
+ .needs_src_clk = true,
.irq_handler = s3c6410_rtc_irq,
.set_freq = s3c6410_rtc_setfreq,
.enable_tick = s3c6410_rtc_enable_tick,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 96128cb009f3..da212813f2d5 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
* parse input
*/
num_of_segments = 0;
- for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
+ for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
for (j = i; (buf[j] != ':') &&
(buf[j] != '\0') &&
(buf[j] != '\n') &&
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 09db45296eed..7497ddde2dd6 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq)
add = 0;
continue;
}
- for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
+ for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
if (clusters_intersect(req, iter->request[pos]) &&
(rq_data_dir(req) == WRITE ||
rq_data_dir(iter->request[pos]) == WRITE)) {
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 62b58d38ce2e..60de66252fa2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct sas_ha_struct *ha = port->ha;
+ struct domain_device *ddev = port->port_dev;
/* prevent revalidation from finding sata links in recovery */
mutex_lock(&ha->disco_mutex);
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
- if (port->port_dev)
- res = sas_ex_revalidate_domain(port->port_dev);
+ if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+ ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
+ res = sas_ex_revalidate_domain(ddev);
SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
port->id, task_pid_nr(current), res);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 9af7841f2e8c..06de34001c66 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
(unsigned long long)xfer->rx_dma);
}
- /* REVISIT: We're waiting for ENDRX before we start the next
+ /* REVISIT: We're waiting for RXBUFF before we start the next
* transfer because we need to handle some difficult timing
- * issues otherwise. If we wait for ENDTX in one transfer and
- * then starts waiting for ENDRX in the next, it's difficult
- * to tell the difference between the ENDRX interrupt we're
- * actually waiting for and the ENDRX interrupt of the
+ * issues otherwise. If we wait for TXBUFE in one transfer and
+ * then starts waiting for RXBUFF in the next, it's difficult
+ * to tell the difference between the RXBUFF interrupt we're
+ * actually waiting for and the RXBUFF interrupt of the
* previous transfer.
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
+ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a0197fd4e95c..3ce39d10fafb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
1,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ return NULL;
+
txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ return NULL;
+
rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 5ba331047cbe..6d331e0db331 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -36,13 +36,13 @@ struct spi_pci_desc {
static struct spi_pci_desc spi_pci_mid_desc_1 = {
.setup = dw_spi_mid_init,
- .num_cs = 32,
+ .num_cs = 5,
.bus_num = 0,
};
static struct spi_pci_desc spi_pci_mid_desc_2 = {
.setup = dw_spi_mid_init,
- .num_cs = 4,
+ .num_cs = 2,
.bus_num = 1,
};
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a97a62b298a..4847afba89f4 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
- for (fifo = 2; fifo <= 256; fifo++) {
+ for (fifo = 1; fifo < 256; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
dw_writew(dws, DW_SPI_TXFLTR, 0);
- dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
+ dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index c01567d53581..e649bc7d4c08 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master,
unsigned long flags;
int ret;
+ if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
+ dev_err(spfi->dev,
+ "Transfer length (%d) is greater than the max supported (%d)",
+ xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
+ return -EINVAL;
+ }
+
/*
* Stop all DMA and reset the controller if the previous transaction
* timed-out and never completed it's DMA.
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 89ca162801da..ee513a85296b 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022)
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
- spi_finalize_current_message(pl022->master);
/* disable the SPI/SSP operation */
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ spi_finalize_current_message(pl022->master);
}
/**
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 884a716e50cb..5c0616870358 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -101,6 +101,7 @@ struct ti_qspi {
#define QSPI_FLEN(n) ((n - 1) << 0)
/* STATUS REGISTER */
+#define BUSY 0x01
#define WC 0x02
/* INTERRUPT REGISTER */
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
}
+static inline u32 qspi_is_busy(struct ti_qspi *qspi)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ while ((stat & BUSY) && time_after(timeout, jiffies)) {
+ cpu_relax();
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ }
+
+ WARN(stat & BUSY, "qspi busy\n");
+ return stat & BUSY;
+}
+
static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
{
int wlen, count;
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
wlen = t->bits_per_word >> 3; /* in bytes */
while (count) {
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
switch (wlen) {
case 1:
dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (!wait_for_completion_timeout(&qspi->transfer_complete,
QSPI_COMPLETION_TIMEOUT)) {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index af98b096af2f..175c9956cbe3 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -144,10 +144,9 @@ struct ffs_io_data {
bool read;
struct kiocb *kiocb;
- const struct iovec *iovec;
- unsigned long nr_segs;
- char __user *buf;
- size_t len;
+ struct iov_iter data;
+ const void *to_free;
+ char *buf;
struct mm_struct *mm;
struct work_struct work;
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->req->actual;
if (io_data->read && ret > 0) {
- int i;
- size_t pos = 0;
-
- /*
- * Since req->length may be bigger than io_data->len (after
- * being rounded up to maxpacketsize), we may end up with more
- * data then user space has space for.
- */
- ret = min_t(int, ret, io_data->len);
-
use_mm(io_data->mm);
- for (i = 0; i < io_data->nr_segs; i++) {
- size_t len = min_t(size_t, ret - pos,
- io_data->iovec[i].iov_len);
- if (!len)
- break;
- if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
- &io_data->buf[pos], len))) {
- ret = -EFAULT;
- break;
- }
- pos += len;
- }
+ ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+ if (iov_iter_count(&io_data->data))
+ ret = -EFAULT;
unuse_mm(io_data->mm);
}
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->private = NULL;
if (io_data->read)
- kfree(io_data->iovec);
+ kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
}
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* before the waiting completes, so do not assign to 'gadget' earlier
*/
struct usb_gadget *gadget = epfile->ffs->gadget;
+ size_t copied;
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
return -ESHUTDOWN;
}
+ data_len = iov_iter_count(&io_data->data);
/*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
- data_len = io_data->read ?
- usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
- io_data->len;
+ if (io_data->read)
+ data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
spin_unlock_irq(&epfile->ffs->eps_lock);
data = kmalloc(data_len, GFP_KERNEL);
if (unlikely(!data))
return -ENOMEM;
- if (io_data->aio && !io_data->read) {
- int i;
- size_t pos = 0;
- for (i = 0; i < io_data->nr_segs; i++) {
- if (unlikely(copy_from_user(&data[pos],
- io_data->iovec[i].iov_base,
- io_data->iovec[i].iov_len))) {
- ret = -EFAULT;
- goto error;
- }
- pos += io_data->iovec[i].iov_len;
- }
- } else {
- if (!io_data->read &&
- unlikely(__copy_from_user(data, io_data->buf,
- io_data->len))) {
+ if (!io_data->read) {
+ copied = copy_from_iter(data, data_len, &io_data->data);
+ if (copied != data_len) {
ret = -EFAULT;
goto error;
}
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
*/
ret = ep->status;
if (io_data->read && ret > 0) {
- ret = min_t(size_t, ret, io_data->len);
-
- if (unlikely(copy_to_user(io_data->buf,
- data, ret)))
+ ret = copy_to_iter(data, ret, &io_data->data);
+ if (unlikely(iov_iter_count(&io_data->data)))
ret = -EFAULT;
}
}
@@ -898,37 +864,6 @@ error:
return ret;
}
-static ssize_t
-ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
- loff_t *ptr)
-{
- struct ffs_io_data io_data;
-
- ENTER();
-
- io_data.aio = false;
- io_data.read = false;
- io_data.buf = (char * __user)buf;
- io_data.len = len;
-
- return ffs_epfile_io(file, &io_data);
-}
-
-static ssize_t
-ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
-{
- struct ffs_io_data io_data;
-
- ENTER();
-
- io_data.aio = false;
- io_data.read = true;
- io_data.buf = buf;
- io_data.len = len;
-
- return ffs_epfile_io(file, &io_data);
-}
-
static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
return value;
}
-static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
- const struct iovec *iovec,
- unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
- struct ffs_io_data *io_data;
+ struct ffs_io_data io_data, *p = &io_data;
+ ssize_t res;
ENTER();
- io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
- if (unlikely(!io_data))
- return -ENOMEM;
+ if (!is_sync_kiocb(kiocb)) {
+ p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+ p->aio = true;
+ } else {
+ p->aio = false;
+ }
- io_data->aio = true;
- io_data->read = false;
- io_data->kiocb = kiocb;
- io_data->iovec = iovec;
- io_data->nr_segs = nr_segs;
- io_data->len = kiocb->ki_nbytes;
- io_data->mm = current->mm;
+ p->read = false;
+ p->kiocb = kiocb;
+ p->data = *from;
+ p->mm = current->mm;
- kiocb->private = io_data;
+ kiocb->private = p;
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
- return ffs_epfile_io(kiocb->ki_filp, io_data);
+ res = ffs_epfile_io(kiocb->ki_filp, p);
+ if (res == -EIOCBQUEUED)
+ return res;
+ if (p->aio)
+ kfree(p);
+ else
+ *from = p->data;
+ return res;
}
-static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
- const struct iovec *iovec,
- unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
- struct ffs_io_data *io_data;
- struct iovec *iovec_copy;
+ struct ffs_io_data io_data, *p = &io_data;
+ ssize_t res;
ENTER();
- iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
- if (unlikely(!iovec_copy))
- return -ENOMEM;
-
- memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
-
- io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
- if (unlikely(!io_data)) {
- kfree(iovec_copy);
- return -ENOMEM;
+ if (!is_sync_kiocb(kiocb)) {
+ p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+ p->aio = true;
+ } else {
+ p->aio = false;
}
- io_data->aio = true;
- io_data->read = true;
- io_data->kiocb = kiocb;
- io_data->iovec = iovec_copy;
- io_data->nr_segs = nr_segs;
- io_data->len = kiocb->ki_nbytes;
- io_data->mm = current->mm;
+ p->read = true;
+ p->kiocb = kiocb;
+ if (p->aio) {
+ p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
+ if (!p->to_free) {
+ kfree(p);
+ return -ENOMEM;
+ }
+ } else {
+ p->data = *to;
+ p->to_free = NULL;
+ }
+ p->mm = current->mm;
- kiocb->private = io_data;
+ kiocb->private = p;
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
- return ffs_epfile_io(kiocb->ki_filp, io_data);
+ res = ffs_epfile_io(kiocb->ki_filp, p);
+ if (res == -EIOCBQUEUED)
+ return res;
+
+ if (p->aio) {
+ kfree(p->to_free);
+ kfree(p);
+ } else {
+ *to = p->data;
+ }
+ return res;
}
static int
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
.open = ffs_epfile_open,
- .write = ffs_epfile_write,
- .read = ffs_epfile_read,
- .aio_write = ffs_epfile_aio_write,
- .aio_read = ffs_epfile_aio_read,
+ .write = new_sync_write,
+ .read = new_sync_read,
+ .write_iter = ffs_epfile_write_iter,
+ .read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
};
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index db49ec4c748e..200f9a584064 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
MODULE_AUTHOR ("David Brownell");
MODULE_LICENSE ("GPL");
+static int ep_open(struct inode *, struct file *);
+
/*----------------------------------------------------------------------*/
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
* still need dev->lock to use epdata->ep.
*/
static int
-get_ready_ep (unsigned f_flags, struct ep_data *epdata)
+get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
{
int val;
if (f_flags & O_NONBLOCK) {
if (!mutex_trylock(&epdata->lock))
goto nonblock;
- if (epdata->state != STATE_EP_ENABLED) {
+ if (epdata->state != STATE_EP_ENABLED &&
+ (!is_write || epdata->state != STATE_EP_READY)) {
mutex_unlock(&epdata->lock);
nonblock:
val = -EAGAIN;
@@ -305,18 +308,20 @@ nonblock:
switch (epdata->state) {
case STATE_EP_ENABLED:
+ return 0;
+ case STATE_EP_READY: /* not configured yet */
+ if (is_write)
+ return 0;
+ // FALLTHRU
+ case STATE_EP_UNBOUND: /* clean disconnect */
break;
// case STATE_EP_DISABLED: /* "can't happen" */
- // case STATE_EP_READY: /* "can't happen" */
default: /* error! */
pr_debug ("%s: ep %p not available, state %d\n",
shortname, epdata, epdata->state);
- // FALLTHROUGH
- case STATE_EP_UNBOUND: /* clean disconnect */
- val = -ENODEV;
- mutex_unlock(&epdata->lock);
}
- return val;
+ mutex_unlock(&epdata->lock);
+ return -ENODEV;
}
static ssize_t
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
return value;
}
-
-/* handle a synchronous OUT bulk/intr/iso transfer */
-static ssize_t
-ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
-{
- struct ep_data *data = fd->private_data;
- void *kbuf;
- ssize_t value;
-
- if ((value = get_ready_ep (fd->f_flags, data)) < 0)
- return value;
-
- /* halt any endpoint by doing a "wrong direction" i/o call */
- if (usb_endpoint_dir_in(&data->desc)) {
- if (usb_endpoint_xfer_isoc(&data->desc)) {
- mutex_unlock(&data->lock);
- return -EINVAL;
- }
- DBG (data->dev, "%s halt\n", data->name);
- spin_lock_irq (&data->dev->lock);
- if (likely (data->ep != NULL))
- usb_ep_set_halt (data->ep);
- spin_unlock_irq (&data->dev->lock);
- mutex_unlock(&data->lock);
- return -EBADMSG;
- }
-
- /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
-
- value = -ENOMEM;
- kbuf = kmalloc (len, GFP_KERNEL);
- if (unlikely (!kbuf))
- goto free1;
-
- value = ep_io (data, kbuf, len);
- VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
- data->name, len, (int) value);
- if (value >= 0 && copy_to_user (buf, kbuf, value))
- value = -EFAULT;
-
-free1:
- mutex_unlock(&data->lock);
- kfree (kbuf);
- return value;
-}
-
-/* handle a synchronous IN bulk/intr/iso transfer */
-static ssize_t
-ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
-{
- struct ep_data *data = fd->private_data;
- void *kbuf;
- ssize_t value;
-
- if ((value = get_ready_ep (fd->f_flags, data)) < 0)
- return value;
-
- /* halt any endpoint by doing a "wrong direction" i/o call */
- if (!usb_endpoint_dir_in(&data->desc)) {
- if (usb_endpoint_xfer_isoc(&data->desc)) {
- mutex_unlock(&data->lock);
- return -EINVAL;
- }
- DBG (data->dev, "%s halt\n", data->name);
- spin_lock_irq (&data->dev->lock);
- if (likely (data->ep != NULL))
- usb_ep_set_halt (data->ep);
- spin_unlock_irq (&data->dev->lock);
- mutex_unlock(&data->lock);
- return -EBADMSG;
- }
-
- /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
-
- value = -ENOMEM;
- kbuf = memdup_user(buf, len);
- if (IS_ERR(kbuf)) {
- value = PTR_ERR(kbuf);
- kbuf = NULL;
- goto free1;
- }
-
- value = ep_io (data, kbuf, len);
- VDEBUG (data->dev, "%s write %zu IN, status %d\n",
- data->name, len, (int) value);
-free1:
- mutex_unlock(&data->lock);
- kfree (kbuf);
- return value;
-}
-
static int
ep_release (struct inode *inode, struct file *fd)
{
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
struct ep_data *data = fd->private_data;
int status;
- if ((status = get_ready_ep (fd->f_flags, data)) < 0)
+ if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
return status;
spin_lock_irq (&data->dev->lock);
@@ -517,8 +431,8 @@ struct kiocb_priv {
struct mm_struct *mm;
struct work_struct work;
void *buf;
- const struct iovec *iv;
- unsigned long nr_segs;
+ struct iov_iter to;
+ const void *to_free;
unsigned actual;
};
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
return value;
}
-static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
-{
- ssize_t len, total;
- void *to_copy;
- int i;
-
- /* copy stuff into user buffers */
- total = priv->actual;
- len = 0;
- to_copy = priv->buf;
- for (i=0; i < priv->nr_segs; i++) {
- ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
-
- if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
- if (len == 0)
- len = -EFAULT;
- break;
- }
-
- total -= this;
- len += this;
- to_copy += this;
- if (total == 0)
- break;
- }
-
- return len;
-}
-
static void ep_user_copy_worker(struct work_struct *work)
{
struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
size_t ret;
use_mm(mm);
- ret = ep_copy_to_user(priv);
+ ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
unuse_mm(mm);
+ if (!ret)
+ ret = -EFAULT;
/* completing the iocb can drop the ctx and mm, don't touch mm after */
aio_complete(iocb, ret, ret);
kfree(priv->buf);
+ kfree(priv->to_free);
kfree(priv);
}
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
* don't need to copy anything to userspace, so we can
* complete the aio request immediately.
*/
- if (priv->iv == NULL || unlikely(req->actual == 0)) {
+ if (priv->to_free == NULL || unlikely(req->actual == 0)) {
kfree(req->buf);
+ kfree(priv->to_free);
kfree(priv);
iocb->private = NULL;
/* aio_complete() reports bytes-transferred _and_ faults */
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
priv->buf = req->buf;
priv->actual = req->actual;
+ INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
spin_unlock(&epdata->dev->lock);
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
put_ep(epdata);
}
-static ssize_t
-ep_aio_rwtail(
- struct kiocb *iocb,
- char *buf,
- size_t len,
- struct ep_data *epdata,
- const struct iovec *iv,
- unsigned long nr_segs
-)
+static ssize_t ep_aio(struct kiocb *iocb,
+ struct kiocb_priv *priv,
+ struct ep_data *epdata,
+ char *buf,
+ size_t len)
{
- struct kiocb_priv *priv;
- struct usb_request *req;
- ssize_t value;
+ struct usb_request *req;
+ ssize_t value;
- priv = kmalloc(sizeof *priv, GFP_KERNEL);
- if (!priv) {
- value = -ENOMEM;
-fail:
- kfree(buf);
- return value;
- }
iocb->private = priv;
priv->iocb = iocb;
- priv->iv = iv;
- priv->nr_segs = nr_segs;
- INIT_WORK(&priv->work, ep_user_copy_worker);
-
- value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
- if (unlikely(value < 0)) {
- kfree(priv);
- goto fail;
- }
kiocb_set_cancel_fn(iocb, ep_aio_cancel);
get_ep(epdata);
@@ -669,75 +538,154 @@ fail:
* allocate or submit those if the host disconnected.
*/
spin_lock_irq(&epdata->dev->lock);
- if (likely(epdata->ep)) {
- req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
- if (likely(req)) {
- priv->req = req;
- req->buf = buf;
- req->length = len;
- req->complete = ep_aio_complete;
- req->context = iocb;
- value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
- if (unlikely(0 != value))
- usb_ep_free_request(epdata->ep, req);
- } else
- value = -EAGAIN;
- } else
- value = -ENODEV;
- spin_unlock_irq(&epdata->dev->lock);
+ value = -ENODEV;
+ if (unlikely(epdata->ep))
+ goto fail;
- mutex_unlock(&epdata->lock);
+ req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
+ value = -ENOMEM;
+ if (unlikely(!req))
+ goto fail;
- if (unlikely(value)) {
- kfree(priv);
- put_ep(epdata);
- } else
- value = -EIOCBQUEUED;
+ priv->req = req;
+ req->buf = buf;
+ req->length = len;
+ req->complete = ep_aio_complete;
+ req->context = iocb;
+ value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
+ if (unlikely(0 != value)) {
+ usb_ep_free_request(epdata->ep, req);
+ goto fail;
+ }
+ spin_unlock_irq(&epdata->dev->lock);
+ return -EIOCBQUEUED;
+
+fail:
+ spin_unlock_irq(&epdata->dev->lock);
+ kfree(priv->to_free);
+ kfree(priv);
+ put_ep(epdata);
return value;
}
static ssize_t
-ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t o)
+ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct ep_data *epdata = iocb->ki_filp->private_data;
- char *buf;
+ struct file *file = iocb->ki_filp;
+ struct ep_data *epdata = file->private_data;
+ size_t len = iov_iter_count(to);
+ ssize_t value;
+ char *buf;
- if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
- return -EINVAL;
+ if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
+ return value;
- buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
- if (unlikely(!buf))
- return -ENOMEM;
+ /* halt any endpoint by doing a "wrong direction" i/o call */
+ if (usb_endpoint_dir_in(&epdata->desc)) {
+ if (usb_endpoint_xfer_isoc(&epdata->desc) ||
+ !is_sync_kiocb(iocb)) {
+ mutex_unlock(&epdata->lock);
+ return -EINVAL;
+ }
+ DBG (epdata->dev, "%s halt\n", epdata->name);
+ spin_lock_irq(&epdata->dev->lock);
+ if (likely(epdata->ep != NULL))
+ usb_ep_set_halt(epdata->ep);
+ spin_unlock_irq(&epdata->dev->lock);
+ mutex_unlock(&epdata->lock);
+ return -EBADMSG;
+ }
- return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
+ buf = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ mutex_unlock(&epdata->lock);
+ return -ENOMEM;
+ }
+ if (is_sync_kiocb(iocb)) {
+ value = ep_io(epdata, buf, len);
+ if (value >= 0 && copy_to_iter(buf, value, to))
+ value = -EFAULT;
+ } else {
+ struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ value = -ENOMEM;
+ if (!priv)
+ goto fail;
+ priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
+ if (!priv->to_free) {
+ kfree(priv);
+ goto fail;
+ }
+ value = ep_aio(iocb, priv, epdata, buf, len);
+ if (value == -EIOCBQUEUED)
+ buf = NULL;
+ }
+fail:
+ kfree(buf);
+ mutex_unlock(&epdata->lock);
+ return value;
}
+static ssize_t ep_config(struct ep_data *, const char *, size_t);
+
static ssize_t
-ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t o)
+ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct ep_data *epdata = iocb->ki_filp->private_data;
- char *buf;
- size_t len = 0;
- int i = 0;
+ struct file *file = iocb->ki_filp;
+ struct ep_data *epdata = file->private_data;
+ size_t len = iov_iter_count(from);
+ bool configured;
+ ssize_t value;
+ char *buf;
+
+ if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
+ return value;
- if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
- return -EINVAL;
+ configured = epdata->state == STATE_EP_ENABLED;
- buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
- if (unlikely(!buf))
+ /* halt any endpoint by doing a "wrong direction" i/o call */
+ if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
+ if (usb_endpoint_xfer_isoc(&epdata->desc) ||
+ !is_sync_kiocb(iocb)) {
+ mutex_unlock(&epdata->lock);
+ return -EINVAL;
+ }
+ DBG (epdata->dev, "%s halt\n", epdata->name);
+ spin_lock_irq(&epdata->dev->lock);
+ if (likely(epdata->ep != NULL))
+ usb_ep_set_halt(epdata->ep);
+ spin_unlock_irq(&epdata->dev->lock);
+ mutex_unlock(&epdata->lock);
+ return -EBADMSG;
+ }
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ mutex_unlock(&epdata->lock);
return -ENOMEM;
+ }
- for (i=0; i < nr_segs; i++) {
- if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
- iov[i].iov_len) != 0)) {
- kfree(buf);
- return -EFAULT;
+ if (unlikely(copy_from_iter(buf, len, from) != len)) {
+ value = -EFAULT;
+ goto out;
+ }
+
+ if (unlikely(!configured)) {
+ value = ep_config(epdata, buf, len);
+ } else if (is_sync_kiocb(iocb)) {
+ value = ep_io(epdata, buf, len);
+ } else {
+ struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ value = -ENOMEM;
+ if (priv) {
+ value = ep_aio(iocb, priv, epdata, buf, len);
+ if (value == -EIOCBQUEUED)
+ buf = NULL;
}
- len += iov[i].iov_len;
}
- return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
+out:
+ kfree(buf);
+ mutex_unlock(&epdata->lock);
+ return value;
}
/*----------------------------------------------------------------------*/
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
/* used after endpoint configuration */
static const struct file_operations ep_io_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = ep_read,
- .write = ep_write,
- .unlocked_ioctl = ep_ioctl,
+ .open = ep_open,
.release = ep_release,
-
- .aio_read = ep_aio_read,
- .aio_write = ep_aio_write,
+ .llseek = no_llseek,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .unlocked_ioctl = ep_ioctl,
+ .read_iter = ep_read_iter,
+ .write_iter = ep_write_iter,
};
/* ENDPOINT INITIALIZATION
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
* speed descriptor, then optional high speed descriptor.
*/
static ssize_t
-ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ep_config (struct ep_data *data, const char *buf, size_t len)
{
- struct ep_data *data = fd->private_data;
struct usb_ep *ep;
u32 tag;
int value, length = len;
- value = mutex_lock_interruptible(&data->lock);
- if (value < 0)
- return value;
-
if (data->state != STATE_EP_READY) {
value = -EL2HLT;
goto fail;
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
goto fail0;
/* we might need to change message format someday */
- if (copy_from_user (&tag, buf, 4)) {
- goto fail1;
- }
+ memcpy(&tag, buf, 4);
if (tag != 1) {
DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
goto fail0;
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
*/
/* full/low speed descriptor, then high speed */
- if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
- goto fail1;
- }
+ memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
goto fail0;
if (len != USB_DT_ENDPOINT_SIZE) {
if (len != 2 * USB_DT_ENDPOINT_SIZE)
goto fail0;
- if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
- USB_DT_ENDPOINT_SIZE)) {
- goto fail1;
- }
+ memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
+ USB_DT_ENDPOINT_SIZE);
if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
|| data->hs_desc.bDescriptorType
!= USB_DT_ENDPOINT) {
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
case USB_SPEED_LOW:
case USB_SPEED_FULL:
ep->desc = &data->desc;
- value = usb_ep_enable(ep);
- if (value == 0)
- data->state = STATE_EP_ENABLED;
break;
case USB_SPEED_HIGH:
/* fails if caller didn't provide that descriptor... */
ep->desc = &data->hs_desc;
- value = usb_ep_enable(ep);
- if (value == 0)
- data->state = STATE_EP_ENABLED;
break;
default:
DBG(data->dev, "unconnected, %s init abandoned\n",
data->name);
value = -EINVAL;
+ goto gone;
}
+ value = usb_ep_enable(ep);
if (value == 0) {
- fd->f_op = &ep_io_operations;
+ data->state = STATE_EP_ENABLED;
value = length;
}
gone:
@@ -867,14 +800,10 @@ fail:
data->desc.bDescriptorType = 0;
data->hs_desc.bDescriptorType = 0;
}
- mutex_unlock(&data->lock);
return value;
fail0:
value = -EINVAL;
goto fail;
-fail1:
- value = -EFAULT;
- goto fail;
}
static int
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
return value;
}
-/* used before endpoint configuration */
-static const struct file_operations ep_config_operations = {
- .llseek = no_llseek,
-
- .open = ep_open,
- .write = ep_config,
- .release = ep_release,
-};
-
/*----------------------------------------------------------------------*/
/* EP0 IMPLEMENTATION can be partly in userspace.
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
enum ep0_state state;
spin_lock_irq (&dev->lock);
+ if (dev->state <= STATE_DEV_OPENED) {
+ retval = -EINVAL;
+ goto done;
+ }
/* report fd mode change before acting on it */
if (dev->setup_abort) {
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
struct dev_data *dev = fd->private_data;
ssize_t retval = -ESRCH;
- spin_lock_irq (&dev->lock);
-
/* report fd mode change before acting on it */
if (dev->setup_abort) {
dev->setup_abort = 0;
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
} else
DBG (dev, "fail %s, state %d\n", __func__, dev->state);
- spin_unlock_irq (&dev->lock);
return retval;
}
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
struct dev_data *dev = fd->private_data;
int mask = 0;
+ if (dev->state <= STATE_DEV_OPENED)
+ return DEFAULT_POLLMASK;
+
poll_wait(fd, &dev->wait, wait);
spin_lock_irq (&dev->lock);
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
return ret;
}
-/* used after device configuration */
-static const struct file_operations ep0_io_operations = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
-
- .read = ep0_read,
- .write = ep0_write,
- .fasync = ep0_fasync,
- .poll = ep0_poll,
- .unlocked_ioctl = dev_ioctl,
- .release = dev_release,
-};
-
/*----------------------------------------------------------------------*/
/* The in-kernel gadget driver handles most ep0 issues, in particular
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
goto enomem1;
data->dentry = gadgetfs_create_file (dev->sb, data->name,
- data, &ep_config_operations);
+ data, &ep_io_operations);
if (!data->dentry)
goto enomem2;
list_add_tail (&data->epfiles, &dev->epfiles);
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
u32 tag;
char *kbuf;
+ spin_lock_irq(&dev->lock);
+ if (dev->state > STATE_DEV_OPENED) {
+ value = ep0_write(fd, buf, len, ptr);
+ spin_unlock_irq(&dev->lock);
+ return value;
+ }
+ spin_unlock_irq(&dev->lock);
+
if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
return -EINVAL;
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
* on, they can work ... except in cleanup paths that
* kick in after the ep0 descriptor is closed.
*/
- fd->f_op = &ep0_io_operations;
value = len;
}
return value;
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
return value;
}
-static const struct file_operations dev_init_operations = {
+static const struct file_operations ep0_operations = {
.llseek = no_llseek,
.open = dev_open,
+ .read = ep0_read,
.write = dev_config,
.fasync = ep0_fasync,
+ .poll = ep0_poll,
.unlocked_ioctl = dev_ioctl,
.release = dev_release,
};
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
goto Enomem;
dev->sb = sb;
- dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
+ dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
if (!dev->dentry) {
put_dev(dev);
goto Enomem;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index f88bfdf5b6a0..2027a27546ef 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
func = vfio_pci_set_err_trigger;
break;
}
+ break;
case VFIO_PCI_REQ_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_req_trigger;
break;
}
+ break;
}
if (!func)
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 32c0b6b28097..9362424c2340 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
len = clcdfb_snprintf_mode(NULL, 0, mode);
name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
clcdfb_snprintf_mode(name, len + 1, mode);
mode->name = name;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 95338593ebf4..868facdec638 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
int num = 0, i, first = 1;
int ver, rev;
- ver = edid[EDID_STRUCT_VERSION];
- rev = edid[EDID_STRUCT_REVISION];
-
mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
if (mode == NULL)
return NULL;
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
return NULL;
}
+ ver = edid[EDID_STRUCT_VERSION];
+ rev = edid[EDID_STRUCT_REVISION];
+
*dbsize = 0;
DPRINTK(" Detailed Timings\n");
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c
index 5a2095a98ed8..12186557a9d4 100644
--- a/drivers/video/fbdev/omap2/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c
@@ -28,44 +28,22 @@
#include <video/omapdss.h>
#include "dss.h"
-static struct omap_dss_device *to_dss_device_sysfs(struct device *dev)
+static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (dssdev->dev == dev) {
- omap_dss_put_device(dssdev);
- return dssdev;
- }
- }
-
- return NULL;
-}
-
-static ssize_t display_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
-
return snprintf(buf, PAGE_SIZE, "%s\n",
dssdev->name ?
dssdev->name : "");
}
-static ssize_t display_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
-
return snprintf(buf, PAGE_SIZE, "%d\n",
omapdss_device_is_enabled(dssdev));
}
-static ssize_t display_enabled_store(struct device *dev,
- struct device_attribute *attr,
+static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int r;
bool enable;
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev,
return size;
}
-static ssize_t display_tear_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
return snprintf(buf, PAGE_SIZE, "%d\n",
dssdev->driver->get_te ?
dssdev->driver->get_te(dssdev) : 0);
}
-static ssize_t display_tear_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_tear_store(struct omap_dss_device *dssdev,
+ const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int r;
bool te;
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev,
return size;
}
-static ssize_t display_timings_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
struct omap_video_timings t;
if (!dssdev->driver->get_timings)
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev,
t.y_res, t.vfp, t.vbp, t.vsw);
}
-static ssize_t display_timings_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_timings_store(struct omap_dss_device *dssdev,
+ const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
struct omap_video_timings t = dssdev->panel.timings;
int r, found;
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev,
return size;
}
-static ssize_t display_rotate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int rotate;
if (!dssdev->driver->get_rotate)
return -ENOENT;
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
}
-static ssize_t display_rotate_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
+ const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int rot, r;
if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev,
return size;
}
-static ssize_t display_mirror_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int mirror;
if (!dssdev->driver->get_mirror)
return -ENOENT;
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
}
-static ssize_t display_mirror_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
+ const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
int r;
bool mirror;
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev,
return size;
}
-static ssize_t display_wss_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
unsigned int wss;
if (!dssdev->driver->get_wss)
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
}
-static ssize_t display_wss_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_wss_store(struct omap_dss_device *dssdev,
+ const char *buf, size_t size)
{
- struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
u32 wss;
int r;
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev,
return size;
}
-static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+struct display_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct omap_dss_device *, char *);
+ ssize_t (*store)(struct omap_dss_device *, const char *, size_t);
+};
+
+#define DISPLAY_ATTR(_name, _mode, _show, _store) \
+ struct display_attribute display_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
+static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
+static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
display_tear_show, display_tear_store);
-static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
display_timings_show, display_timings_store);
-static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
display_rotate_show, display_rotate_store);
-static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
display_mirror_show, display_mirror_store);
-static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
display_wss_show, display_wss_store);
-static const struct attribute *display_sysfs_attrs[] = {
- &dev_attr_display_name.attr,
- &dev_attr_enabled.attr,
- &dev_attr_tear_elim.attr,
- &dev_attr_timings.attr,
- &dev_attr_rotate.attr,
- &dev_attr_mirror.attr,
- &dev_attr_wss.attr,
+static struct attribute *display_sysfs_attrs[] = {
+ &display_attr_name.attr,
+ &display_attr_display_name.attr,
+ &display_attr_enabled.attr,
+ &display_attr_tear_elim.attr,
+ &display_attr_timings.attr,
+ &display_attr_rotate.attr,
+ &display_attr_mirror.attr,
+ &display_attr_wss.attr,
NULL
};
+static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct omap_dss_device *dssdev;
+ struct display_attribute *display_attr;
+
+ dssdev = container_of(kobj, struct omap_dss_device, kobj);
+ display_attr = container_of(attr, struct display_attribute, attr);
+
+ if (!display_attr->show)
+ return -ENOENT;
+
+ return display_attr->show(dssdev, buf);
+}
+
+static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev;
+ struct display_attribute *display_attr;
+
+ dssdev = container_of(kobj, struct omap_dss_device, kobj);
+ display_attr = container_of(attr, struct display_attribute, attr);
+
+ if (!display_attr->store)
+ return -ENOENT;
+
+ return display_attr->store(dssdev, buf, size);
+}
+
+static const struct sysfs_ops display_sysfs_ops = {
+ .show = display_attr_show,
+ .store = display_attr_store,
+};
+
+static struct kobj_type display_ktype = {
+ .sysfs_ops = &display_sysfs_ops,
+ .default_attrs = display_sysfs_attrs,
+};
+
int display_init_sysfs(struct platform_device *pdev)
{
struct omap_dss_device *dssdev = NULL;
int r;
for_each_dss_dev(dssdev) {
- struct kobject *kobj = &dssdev->dev->kobj;
-
- r = sysfs_create_files(kobj, display_sysfs_attrs);
+ r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
+ &pdev->dev.kobj, dssdev->alias);
if (r) {
DSSERR("failed to create sysfs files\n");
- goto err;
- }
-
- r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias);
- if (r) {
- sysfs_remove_files(kobj, display_sysfs_attrs);
-
- DSSERR("failed to create sysfs display link\n");
+ omap_dss_put_device(dssdev);
goto err;
}
}
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev)
struct omap_dss_device *dssdev = NULL;
for_each_dss_dev(dssdev) {
- sysfs_remove_link(&pdev->dev.kobj, dssdev->alias);
- sysfs_remove_files(&dssdev->dev->kobj,
- display_sysfs_attrs);
+ if (kobject_name(&dssdev->kobj) == NULL)
+ continue;
+
+ kobject_del(&dssdev->kobj);
+ kobject_put(&dssdev->kobj);
+
+ memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
}
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b4bca2d4a7e5..70fba973a107 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq)
pirq_query_unmask(irq);
rc = set_evtchn_to_irq(evtchn, irq);
- if (rc != 0) {
- pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
- irq, rc);
- xen_evtchn_close(evtchn);
- return 0;
- }
+ if (rc)
+ goto err;
+
bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
+ rc = xen_evtchn_port_setup(info);
+ if (rc)
+ goto err;
+
out:
unmask_evtchn(evtchn);
eoi_pirq(irq_get_irq_data(irq));
return 0;
+
+err:
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
+ xen_evtchn_close(evtchn);
+ return 0;
}
static unsigned int startup_pirq(struct irq_data *data)
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 46ae0f9f02ad..75fe3d466515 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,7 +16,7 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-static bool permissive;
+bool permissive;
module_param(permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index e56c934ad137..2e1d73d1d5d0 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,6 +64,8 @@ struct config_field_entry {
void *data;
};
+extern bool permissive;
+
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
/* Add fields to a device - the add_fields macro expects to get a pointer to
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index c5ee82587e8c..2d7369391472 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -11,6 +11,10 @@
#include "pciback.h"
#include "conf_space.h"
+struct pci_cmd_info {
+ u16 val;
+};
+
struct pci_bar_info {
u32 val;
u32 len_val;
@@ -20,22 +24,36 @@ struct pci_bar_info {
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
-static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+/* Bits guests are allowed to control in permissive mode. */
+#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
+ PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
+ PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
+
+static void *command_init(struct pci_dev *dev, int offset)
{
- int i;
- int ret;
-
- ret = xen_pcibk_read_config_word(dev, offset, value, data);
- if (!pci_is_enabled(dev))
- return ret;
-
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (dev->resource[i].flags & IORESOURCE_IO)
- *value |= PCI_COMMAND_IO;
- if (dev->resource[i].flags & IORESOURCE_MEM)
- *value |= PCI_COMMAND_MEMORY;
+ struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ int err;
+
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
+ if (err) {
+ kfree(cmd);
+ return ERR_PTR(err);
}
+ return cmd;
+}
+
+static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+{
+ int ret = pci_read_config_word(dev, offset, value);
+ const struct pci_cmd_info *cmd = data;
+
+ *value &= PCI_COMMAND_GUEST;
+ *value |= cmd->val & ~PCI_COMMAND_GUEST;
+
return ret;
}
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
{
struct xen_pcibk_dev_data *dev_data;
int err;
+ u16 val;
+ struct pci_cmd_info *cmd = data;
dev_data = pci_get_drvdata(dev);
if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
}
}
+ cmd->val = value;
+
+ if (!permissive && (!dev_data || !dev_data->permissive))
+ return 0;
+
+ /* Only allow the guest to control certain bits. */
+ err = pci_read_config_word(dev, offset, &val);
+ if (err || val == value)
+ return err;
+
+ value &= PCI_COMMAND_GUEST;
+ value |= val & ~PCI_COMMAND_GUEST;
+
return pci_write_config_word(dev, offset, value);
}
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
{
.offset = PCI_COMMAND,
.size = 2,
+ .init = command_init,
+ .release = bar_release,
.u.w.read = command_read,
.u.w.write = command_write,
},
diff --git a/fs/locks.c b/fs/locks.c
index f1bad681fc1c..528fedfda15e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1728,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
break;
}
}
- trace_generic_delete_lease(inode, fl);
+ trace_generic_delete_lease(inode, victim);
if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 469086b9f99b..0c3f303baf32 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
struct nilfs_inode_info *ii, *n;
+ int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
int defer_iput = false;
spin_lock(&nilfs->ns_inode_lock);
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
brelse(ii->i_bh);
ii->i_bh = NULL;
list_del_init(&ii->i_dirty);
- if (!ii->vfs_inode.i_nlink) {
+ if (!ii->vfs_inode.i_nlink || during_mount) {
/*
- * Defer calling iput() to avoid a deadlock
- * over I_SYNC flag for inodes with i_nlink == 0
+ * Defer calling iput() to avoid deadlocks if
+ * i_nlink == 0 or mount is not yet finished.
*/
list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
defer_iput = true;
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 9a66ff79ff27..d2f97ecca6a5 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
!(marks_mask & FS_ISDIR & ~marks_ignored_mask))
return false;
- if (event_mask & marks_mask & ~marks_ignored_mask)
+ if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
+ ~marks_ignored_mask)
return true;
return false;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 8490c64d34fe..460c6c37e683 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
{
- if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+ if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
return 1;
return 0;
}
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 20e37a3ed26f..db64ce2d4667 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -102,11 +102,11 @@
| OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \
| OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \
| OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \
- | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO)
+ | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \
+ | OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
| OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
- | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \
- | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+ | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
/*
* Heartbeat-only devices are missing journals and other files. The
@@ -179,6 +179,11 @@
#define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000
/*
+ * Append Direct IO support
+ */
+#define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000
+
+/*
* backup superblock flag is used to indicate that this volume
* has backup superblocks.
*/
@@ -200,10 +205,6 @@
#define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002
#define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004
-/*
- * Append Direct IO support
- */
-#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008
/* The byte offset of the first backup block will be 1G.
* The following will be 4G, 16G, 64G, 256G and 1T.
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index f2e47fd56751..613372375ada 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -208,40 +208,41 @@
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
-#define _INTEL_BDW_M(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-#define _INTEL_BDW_D(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-
-#define _INTEL_BDW_M_IDS(gt, info) \
- _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
- _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
-
-#define _INTEL_BDW_D_IDS(gt, info) \
- _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
- _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
-
-#define INTEL_BDW_GT12M_IDS(info) \
- _INTEL_BDW_M_IDS(1, info), \
- _INTEL_BDW_M_IDS(2, info)
+#define INTEL_BDW_GT12M_IDS(info) \
+ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
+ INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
#define INTEL_BDW_GT12D_IDS(info) \
- _INTEL_BDW_D_IDS(1, info), \
- _INTEL_BDW_D_IDS(2, info)
+ INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
+ INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
+ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
+ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
#define INTEL_BDW_GT3M_IDS(info) \
- _INTEL_BDW_M_IDS(3, info)
+ INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x162E, info) /* ULX */
#define INTEL_BDW_GT3D_IDS(info) \
- _INTEL_BDW_D_IDS(3, info)
+ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
#define INTEL_BDW_RSVDM_IDS(info) \
- _INTEL_BDW_M_IDS(4, info)
+ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x163E, info) /* ULX */
#define INTEL_BDW_RSVDD_IDS(info) \
- _INTEL_BDW_D_IDS(4, info)
+ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 2fbc804e1a45..226f77246a70 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -13,7 +13,8 @@
#define PULL_DISABLE (1 << 3)
#define INPUT_EN (1 << 5)
-#define SLEWCTRL_FAST (1 << 6)
+#define SLEWCTRL_SLOW (1 << 6)
+#define SLEWCTRL_FAST 0
/* update macro depending on INPUT_EN and PULL_ENA */
#undef PIN_OUTPUT
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index 9c2e4f82381e..5f4d01898c9c 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -18,7 +18,8 @@
#define PULL_DISABLE (1 << 16)
#define PULL_UP (1 << 17)
#define INPUT_EN (1 << 18)
-#define SLEWCTRL_FAST (1 << 19)
+#define SLEWCTRL_SLOW (1 << 19)
+#define SLEWCTRL_FAST 0
#define DS0_PULL_UP_DOWN_EN (1 << 27)
#define PIN_OUTPUT (PULL_DISABLE)
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 8381bbfbc308..68c16a6bedb3 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees);
*/
int clk_get_phase(struct clk *clk);
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q);
+
#else
static inline long clk_get_accuracy(struct clk *clk)
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+ return p == q;
+}
+
#endif
/**
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 800544bc7bfd..781974afff9f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -166,6 +166,11 @@
#define GITS_TRANSLATER 0x10040
+#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_QUIESCENT (1U << 31)
+
+#define GITS_TYPER_DEVBITS_SHIFT 13
+#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_CBASER_VALID (1UL << 63)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 72ba725ddf9c..5bb074431eb0 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -5,6 +5,7 @@
struct kmem_cache;
struct page;
+struct vm_struct;
#ifdef CONFIG_KASAN
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size);
void kasan_slab_alloc(struct kmem_cache *s, void *object);
void kasan_slab_free(struct kmem_cache *s, void *object);
-#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
-
int kasan_module_alloc(void *addr, size_t size);
-void kasan_module_free(void *addr);
+void kasan_free_shadow(const struct vm_struct *vm);
#else /* CONFIG_KASAN */
-#define MODULE_ALIGN 1
-
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_enable_current(void) {}
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_module_free(void *addr) {}
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
#endif /* CONFIG_KASAN */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index f7556261fe3c..4d0cb9bba93e 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
+
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
#endif
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 8a860f096c35..611a691145c4 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root,
static inline void of_platform_depopulate(struct device *parent) { }
#endif
-#ifdef CONFIG_OF_DYNAMIC
+#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
extern void of_platform_register_reconfig_notifier(void);
#else
static inline void of_platform_register_reconfig_notifier(void) { }
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index ed9489d893a4..856d34dde79b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -649,7 +649,7 @@ struct spi_transfer {
* sequence completes. On some systems, many such sequences can execute as
* as single programmed DMA transfer. On all systems, these messages are
* queued, and might complete after transactions to other devices. Messages
- * sent to a given spi_device are alway executed in FIFO order.
+ * sent to a given spi_device are always executed in FIFO order.
*
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 07a022641996..71880299ed48 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
+
static inline size_t iov_iter_count(struct iov_iter *i)
{
return i->count;
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 7d7acb35603d..0ec598381f97 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 74db135f9957..f597846ff605 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -70,7 +70,8 @@ enum {
/* data contains off-queue information when !WORK_STRUCT_PWQ */
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
- WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
+ __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
+ WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
/*
* When a work item is off queue, its high bits point to the last
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 9eaaa7884586..decb9a095ae7 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
const struct nft_data *data,
enum nft_data_types type);
+
+/**
+ * struct nft_userdata - user defined data associated with an object
+ *
+ * @len: length of the data
+ * @data: content
+ *
+ * The presence of user data is indicated in an object specific fashion,
+ * so a length of zero can't occur and the value "len" indicates data
+ * of length len + 1.
+ */
+struct nft_userdata {
+ u8 len;
+ unsigned char data[0];
+};
+
/**
* struct nft_set_elem - generic representation of set elements
*
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
* @handle: rule handle
* @genmask: generation mask
* @dlen: length of expression data
- * @ulen: length of user data (used for comments)
+ * @udata: user data is appended to the rule
* @data: expression data
*/
struct nft_rule {
@@ -388,7 +404,7 @@ struct nft_rule {
u64 handle:42,
genmask:2,
dlen:12,
- ulen:8;
+ udata:1;
unsigned char data[]
__attribute__((aligned(__alignof__(struct nft_expr))));
};
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
return (struct nft_expr *)&rule->data[rule->dlen];
}
-static inline void *nft_userdata(const struct nft_rule *rule)
+static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
{
return (void *)&rule->data[rule->dlen];
}
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h
index 0210797abf2e..dc10c52e0e91 100644
--- a/include/soc/at91/at91sam9_ddrsdr.h
+++ b/include/soc/at91/at91sam9_ddrsdr.h
@@ -92,7 +92,7 @@
#define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */
#define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */
-#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */
+#define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */
#define AT91_DDRSDRC_MD_SDR 0
#define AT91_DDRSDRC_MD_LOW_POWER_SDR 1
#define AT91_DDRSDRC_MD_LOW_POWER_DDR 3
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e6efac23c7ea..07735822a28f 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -151,7 +151,7 @@
/* add more to the end as needed */
#define fourcc_mod_code(vendor, val) \
- ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffL))
+ ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
/*
* Format Modifier tokens:
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6eed16b92a24..8d1be9073380 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -347,6 +347,9 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
#define I915_PARAM_MMAP_VERSION 30
#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
typedef struct drm_i915_getparam {
int param;
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 60de61fea8e3..c8ed15daad02 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops {
};
struct omap_dss_device {
+ struct kobject kobj;
struct device *dev;
struct module *owner;
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b78f21caf55a..b0f1c9e5d687 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
const char *mod_name);
#define xenbus_register_frontend(drv) \
- __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+ __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME)
#define xenbus_register_backend(drv) \
- __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
+ __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME)
void xenbus_unregister_driver(struct xenbus_driver *drv);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1d1fe9361d29..fc7f4748d34a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_lock();
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
- if (cp == root_cs)
- continue;
-
/* skip the whole subtree if @cp doesn't have any CPU */
if (cpumask_empty(cp->cpus_allowed)) {
pos_css = css_rightmost_descendant(pos_css);
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some CPUs.
*/
- if (cpumask_empty(new_cpus))
+ if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
cpumask_copy(new_cpus, parent->effective_cpus);
/* Skip the whole subtree if the cpumask remains the same. */
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* If it becomes empty, inherit the effective mask of the
* parent, which is guaranteed to have some MEMs.
*/
- if (nodes_empty(*new_mems))
+ if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
*new_mems = parent->effective_mems;
/* Skip the whole subtree if the nodemask remains the same. */
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
+ cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+ cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);
diff --git a/kernel/module.c b/kernel/module.c
index cc93cf68653c..b3d634ed06c9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -56,7 +56,6 @@
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
-#include <linux/kasan.h>
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { }
void __weak module_memfree(void *module_region)
{
vfree(module_region);
- kasan_module_free(module_region);
}
void __weak module_arch_cleanup(struct module *mod)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 45e5cb143d17..4f228024055b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_graph_active;
+#else
+# define ftrace_graph_active 0
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
static struct ftrace_ops *removed_ops;
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
if (!ftrace_rec_count(rec))
rec->flags = 0;
else
- /* Just disable the record (keep REGS state) */
- rec->flags &= ~FTRACE_FL_ENABLED;
+ /*
+ * Just disable the record, but keep the ops TRAMP
+ * and REGS states. The _EN flags must be disabled though.
+ */
+ rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
+ FTRACE_FL_REGS_EN);
}
return FTRACE_UPDATE_MAKE_NOP;
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
static void ftrace_startup_sysctl(void)
{
+ int command;
+
if (unlikely(ftrace_disabled))
return;
/* Force update next time */
saved_ftrace_func = NULL;
/* ftrace_start_up is true if we want ftrace running */
- if (ftrace_start_up)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (ftrace_start_up) {
+ command = FTRACE_UPDATE_CALLS;
+ if (ftrace_graph_active)
+ command |= FTRACE_START_FUNC_RET;
+ ftrace_startup_enable(command);
+ }
}
static void ftrace_shutdown_sysctl(void)
{
+ int command;
+
if (unlikely(ftrace_disabled))
return;
/* ftrace_start_up is true if ftrace is running */
- if (ftrace_start_up)
- ftrace_run_update_code(FTRACE_DISABLE_CALLS);
+ if (ftrace_start_up) {
+ command = FTRACE_DISABLE_CALLS;
+ if (ftrace_graph_active)
+ command |= FTRACE_STOP_FUNC_RET;
+ ftrace_run_update_code(command);
+ }
}
static cycle_t ftrace_update_time;
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
if (ftrace_enabled) {
- ftrace_startup_sysctl();
-
/* we are starting ftrace again */
if (ftrace_ops_list != &ftrace_list_end)
update_ftrace_function();
+ ftrace_startup_sysctl();
+
} else {
/* stopping ftrace calls (just send to ftrace_stub) */
ftrace_trace_function = ftrace_stub;
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = {
ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
};
-static int ftrace_graph_active;
-
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f28849394791..41ff75b478c6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(flush_work);
+struct cwt_wait {
+ wait_queue_t wait;
+ struct work_struct *work;
+};
+
+static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
+
+ if (cwait->work != key)
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
+ static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
/*
- * If someone else is canceling, wait for the same event it
- * would be waiting for before retrying.
+ * If someone else is already canceling, wait for it to
+ * finish. flush_work() doesn't work for PREEMPT_NONE
+ * because we may get scheduled between @work's completion
+ * and the other canceling task resuming and clearing
+ * CANCELING - flush_work() will return false immediately
+ * as @work is no longer busy, try_to_grab_pending() will
+ * return -ENOENT as @work is still being canceled and the
+ * other canceling task won't be able to clear CANCELING as
+ * we're hogging the CPU.
+ *
+ * Let's wait for completion using a waitqueue. As this
+ * may lead to the thundering herd problem, use a custom
+ * wake function which matches @work along with exclusive
+ * wait and wakeup.
*/
- if (unlikely(ret == -ENOENT))
- flush_work(work);
+ if (unlikely(ret == -ENOENT)) {
+ struct cwt_wait cwait;
+
+ init_wait(&cwait.wait);
+ cwait.wait.func = cwt_wakefn;
+ cwait.work = work;
+
+ prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
+ TASK_UNINTERRUPTIBLE);
+ if (work_is_canceling(work))
+ schedule();
+ finish_wait(&cancel_waitq, &cwait.wait);
+ }
} while (unlikely(ret < 0));
/* tell other tasks trying to grab @work to back off */
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
flush_work(work);
clear_work_data(work);
+
+ /*
+ * Paired with prepare_to_wait() above so that either
+ * waitqueue_active() is visible here or !work_is_canceling() is
+ * visible there.
+ */
+ smp_mb();
+ if (waitqueue_active(&cancel_waitq))
+ __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
+
return ret;
}
diff --git a/lib/Makefile b/lib/Makefile
index 87eb3bffc283..58f74d2dd396 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,7 @@ obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
- gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
+ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
obj-y += string_helpers.o
diff --git a/mm/iov_iter.c b/lib/iov_iter.c
index 827732047da1..9d96e283520c 100644
--- a/mm/iov_iter.c
+++ b/lib/iov_iter.c
@@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
return npages;
}
EXPORT_SYMBOL(iov_iter_npages);
+
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
+{
+ *new = *old;
+ if (new->type & ITER_BVEC)
+ return new->bvec = kmemdup(new->bvec,
+ new->nr_segs * sizeof(struct bio_vec),
+ flags);
+ else
+ /* iovec and kvec have identical layout */
+ return new->iov = kmemdup(new->iov,
+ new->nr_segs * sizeof(struct iovec),
+ flags);
+}
+EXPORT_SYMBOL(dup_iter);
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 88c0854bd752..5c94e1012a91 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
if (s->len < s->size) {
len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
- if (seq_buf_can_fit(s, len)) {
+ if (s->len + len < s->size) {
s->len += len;
return 0;
}
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
if (s->len < s->size) {
ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
- if (seq_buf_can_fit(s, ret)) {
+ if (s->len + ret < s->size) {
s->len += ret;
return 0;
}
diff --git a/mm/Makefile b/mm/Makefile
index 3c1caa2693bd..15dbe9903c27 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- iov_iter.o debug.o $(mmu-y)
+ debug.o $(mmu-y)
obj-y += init-mm.o
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd1de90..68ecb7a42983 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
return (1UL << (align_order - cma->order_per_bit)) - 1;
}
+/*
+ * Find a PFN aligned to the specified order and return an offset represented in
+ * order_per_bits.
+ */
static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
{
- unsigned int alignment;
-
if (align_order <= cma->order_per_bit)
return 0;
- alignment = 1UL << (align_order - cma->order_per_bit);
- return ALIGN(cma->base_pfn, alignment) -
- (cma->base_pfn >> cma->order_per_bit);
+
+ return (ALIGN(cma->base_pfn, (1UL << align_order))
+ - cma->base_pfn) >> cma->order_per_bit;
}
static unsigned long cma_bitmap_maxno(struct cma *cma)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fc00c8cb5a82..626e93db28ba 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Avoid grouping on DSO/COW pages in specific and RO pages
* in general, RO pages shouldn't hurt as much anyway since
* they can be in shared cache state.
+ *
+ * FIXME! This checks "pmd_dirty()" as an approximation of
+ * "is this a read-only page", since checking "pmd_write()"
+ * is even more broken. We haven't actually turned this into
+ * a writable page, so pmd_write() will always be false.
*/
- if (!pmd_write(pmd))
+ if (!pmd_dirty(pmd))
flags |= TNF_NO_GROUP;
/*
@@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pmd_t entry;
+ ret = 1;
/*
* Avoid trapping faults against the zero page. The read-only
@@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (prot_numa && is_huge_zero_pmd(*pmd)) {
spin_unlock(ptl);
- return 0;
+ return ret;
}
if (!prot_numa || !pmd_protnone(*pmd)) {
- ret = 1;
entry = pmdp_get_and_clear_notify(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a9ac6c26832..c41b2a0ee273 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__SetPageHead(page);
__ClearPageReserved(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
- __SetPageTail(p);
/*
* For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__ClearPageReserved(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 78fee632a7ee..936d81661c47 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -29,6 +29,7 @@
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/kasan.h>
#include "kasan.h"
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size)
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
__builtin_return_address(0));
- return ret ? 0 : -ENOMEM;
+
+ if (ret) {
+ find_vm_area(addr)->flags |= VM_KASAN;
+ return 0;
+ }
+
+ return -ENOMEM;
}
-void kasan_module_free(void *addr)
+void kasan_free_shadow(const struct vm_struct *vm)
{
- vfree(kasan_mem_to_shadow(addr));
+ if (vm->flags & VM_KASAN)
+ vfree(kasan_mem_to_shadow(vm->addr));
}
static void register_global(struct kasan_global *global)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9fe07692eaad..b34ef4a32a3b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
* on for the root memcg is enough.
*/
if (cgroup_on_dfl(root_css->cgroup))
- mem_cgroup_from_css(root_css)->use_hierarchy = true;
+ root_mem_cgroup->use_hierarchy = true;
+ else
+ root_mem_cgroup->use_hierarchy = false;
}
static u64 memory_current_read(struct cgroup_subsys_state *css,
diff --git a/mm/memory.c b/mm/memory.c
index 8068893697bb..411144f977b1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Avoid grouping on DSO/COW pages in specific and RO pages
* in general, RO pages shouldn't hurt as much anyway since
* they can be in shared cache state.
+ *
+ * FIXME! This checks "pmd_dirty()" as an approximation of
+ * "is this a read-only page", since checking "pmd_write()"
+ * is even more broken. We haven't actually turned this into
+ * a writable page, so pmd_write() will always be false.
*/
- if (!pte_write(pte))
+ if (!pte_dirty(pte))
flags |= TNF_NO_GROUP;
/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 73cf0987088c..8a54cd214925 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -26,10 +26,10 @@
int can_do_mlock(void)
{
- if (capable(CAP_IPC_LOCK))
- return 1;
if (rlimit(RLIMIT_MEMLOCK) != 0)
return 1;
+ if (capable(CAP_IPC_LOCK))
+ return 1;
return 0;
}
EXPORT_SYMBOL(can_do_mlock);
diff --git a/mm/nommu.c b/mm/nommu.c
index 3e67e7538ecf..3fba2dc97c44 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -62,6 +62,7 @@ void *high_memory;
EXPORT_SYMBOL(high_memory);
struct page *mem_map;
unsigned long max_mapnr;
+EXPORT_SYMBOL(max_mapnr);
unsigned long highest_memmap_pfn;
struct percpu_counter vm_committed_as;
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7abfa70cdc1a..40e29429e7b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out;
}
/* Exhausted what can be done so it's blamo time */
- if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false))
+ if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
+ || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
*did_some_progress = 1;
out:
oom_zonelist_unlock(ac->zonelist, gfp_mask);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35b25e1340ca..49abccf29a29 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr)
spin_unlock(&vmap_area_lock);
vmap_debug_free_range(va->va_start, va->va_end);
+ kasan_free_shadow(vm);
free_unmap_vmap_area(va);
vm->size -= PAGE_SIZE;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 66e08040ced7..32d710eaf1fc 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
goto inval_skb;
}
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 2c8d98e728c0..145a50c4d566 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
{
struct iphdr iph;
+ int netoff;
u32 len;
if (skb->protocol != htons(ETH_P_IP))
return skb;
- if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
+ netoff = skb_network_offset(skb);
+
+ if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
return skb;
if (iph.ihl < 5 || iph.version != 4)
return skb;
len = ntohs(iph.tot_len);
- if (skb->len < len || len < (iph.ihl * 4))
+ if (skb->len < netoff + len || len < (iph.ihl * 4))
return skb;
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
- if (!pskb_may_pull(skb, iph.ihl*4))
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
return skb;
- if (pskb_trim_rcsum(skb, len))
+ if (pskb_trim_rcsum(skb, netoff + len))
return skb;
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(skb, user))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 31d8c71986b4..5cd99271d3a6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
kfree_skb(skb);
}
-static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk,
- const struct sk_buff *skb,
- int ee_origin)
+/* IPv4 supports cmsg on all imcp errors and some timestamps
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+ * the PKTINFO fields in skb->cb[]. Fill those in here.
+ */
+static bool ipv4_datagram_support_cmsg(const struct sock *sk,
+ struct sk_buff *skb,
+ int ee_origin)
{
- struct in_pktinfo *info = PKTINFO_SKB_CB(skb);
+ struct in_pktinfo *info;
+
+ if (ee_origin == SO_EE_ORIGIN_ICMP)
+ return true;
- if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) ||
- (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
+ if (ee_origin == SO_EE_ORIGIN_LOCAL)
+ return false;
+
+ /* Support IP_PKTINFO on tstamp packets if requested, to correlate
+ * timestamp with egress dev. Not possible for packets without dev
+ * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
+ */
+ if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
(!skb->dev))
return false;
+ info = PKTINFO_SKB_CB(skb);
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
info->ipi_ifindex = skb->dev->ifindex;
return true;
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
serr = SKB_EXT_ERR(skb);
- if (sin && skb->len) {
+ if (sin && serr->port) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
sin = &errhdr.offender;
memset(sin, 0, sizeof(*sin));
- if (skb->len &&
- (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
- ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
+ if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
if (inet_sk(sk)->cmsg_flags)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e9f66e1cda50..208d5439e59b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
kgid_t low, high;
int ret = 0;
+ if (sk->sk_family == AF_INET6)
+ sk->sk_ipv6only = 1;
+
inet_get_ping_group_range_net(net, &low, &high);
if (gid_lte(low, group) && gid_lte(group, high))
return 0;
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
if (addr_len < sizeof(*addr))
return -EINVAL;
+ if (addr->sin_family != AF_INET &&
+ !(addr->sin_family == AF_UNSPEC &&
+ addr->sin_addr.s_addr == htonl(INADDR_ANY)))
+ return -EAFNOSUPPORT;
+
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
return -EINVAL;
if (addr->sin6_family != AF_INET6)
- return -EINVAL;
+ return -EAFNOSUPPORT;
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET)
- return -EINVAL;
+ return -EAFNOSUPPORT;
daddr = usin->sin_addr.s_addr;
/* no remote port */
} else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9d72a0fcd928..995a2259bcfc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
int large_allowed)
{
struct tcp_sock *tp = tcp_sk(sk);
- u32 new_size_goal, size_goal, hlen;
+ u32 new_size_goal, size_goal;
if (!large_allowed || !sk_can_gso(sk))
return mss_now;
- /* Maybe we should/could use sk->sk_prot->max_header here ? */
- hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
- inet_csk(sk)->icsk_ext_hdr_len +
- tp->tcp_header_len;
-
- new_size_goal = sk->sk_gso_max_size - 1 - hlen;
+ /* Note : tcp_tso_autosize() will eventually split this later */
+ new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
/* We try hard to avoid divides here */
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index c215be70cac0..ace8daca5c83 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
kfree_skb(skb);
}
-static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb)
+/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
+ *
+ * At one point, excluding local errors was a quick test to identify icmp/icmp6
+ * errors. This is no longer true, but the test remained, so the v6 stack,
+ * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+ * the PKTINFO fields in skb->cb[]. Fill those in here.
+ */
+static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
+ struct sock_exterr_skb *serr)
{
- int ifindex = skb->dev ? skb->dev->ifindex : -1;
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+ serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
+ return true;
+
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
+ return false;
+
+ if (!skb->dev)
+ return false;
if (skb->protocol == htons(ETH_P_IPV6))
- IP6CB(skb)->iif = ifindex;
+ IP6CB(skb)->iif = skb->dev->ifindex;
else
- PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex;
+ PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
+
+ return true;
}
/*
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
serr = SKB_EXT_ERR(skb);
- if (sin && skb->len) {
+ if (sin && serr->port) {
const unsigned char *nh = skb_network_header(skb);
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
memset(sin, 0, sizeof(*sin));
- if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) {
+
+ if (ip6_datagram_support_cmsg(skb, serr)) {
sin->sin6_family = AF_INET6;
- if (np->rxopt.all) {
- if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
- serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
- ip6_datagram_prepare_pktinfo_errqueue(skb);
+ if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
- }
if (skb->protocol == htons(ETH_P_IPV6)) {
sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bd46f736f61d..a2dfff6ff227 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
- if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
- u->sin6_family != AF_INET6) {
+ if (msg->msg_namelen < sizeof(*u))
return -EINVAL;
+ if (u->sin6_family != AF_INET6) {
+ return -EAFNOSUPPORT;
}
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != u->sin6_scope_id) {
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index c47ffd7a0a70..d93ceeb3ef04 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
return;
}
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
+ kfree(param->pe_data);
}
if (opt)
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
);
#endif
+ ip_vs_pe_put(param.pe);
return 0;
/* Error exit */
out:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 199fd0f27b0e..6ab777912237 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
{
- rule->genmask = 0;
+ rule->genmask &= ~(1 << gencursor_next(net));
}
static int
@@ -1711,9 +1711,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
}
nla_nest_end(skb, list);
- if (rule->ulen &&
- nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule)))
- goto nla_put_failure;
+ if (rule->udata) {
+ struct nft_userdata *udata = nft_userdata(rule);
+ if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
+ udata->data) < 0)
+ goto nla_put_failure;
+ }
nlmsg_end(skb, nlh);
return 0;
@@ -1896,11 +1899,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
struct nft_table *table;
struct nft_chain *chain;
struct nft_rule *rule, *old_rule = NULL;
+ struct nft_userdata *udata;
struct nft_trans *trans = NULL;
struct nft_expr *expr;
struct nft_ctx ctx;
struct nlattr *tmp;
- unsigned int size, i, n, ulen = 0;
+ unsigned int size, i, n, ulen = 0, usize = 0;
int err, rem;
bool create;
u64 handle, pos_handle;
@@ -1968,12 +1972,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
n++;
}
}
+ /* Check for overflow of dlen field */
+ err = -EFBIG;
+ if (size >= 1 << 12)
+ goto err1;
- if (nla[NFTA_RULE_USERDATA])
+ if (nla[NFTA_RULE_USERDATA]) {
ulen = nla_len(nla[NFTA_RULE_USERDATA]);
+ if (ulen > 0)
+ usize = sizeof(struct nft_userdata) + ulen;
+ }
err = -ENOMEM;
- rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
if (rule == NULL)
goto err1;
@@ -1981,10 +1992,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
rule->handle = handle;
rule->dlen = size;
- rule->ulen = ulen;
+ rule->udata = ulen ? 1 : 0;
- if (ulen)
- nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen);
+ if (ulen) {
+ udata = nft_userdata(rule);
+ udata->len = ulen - 1;
+ nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
+ }
expr = nft_expr_first(rule);
for (i = 0; i < n; i++) {
@@ -2031,12 +2045,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
err3:
list_del_rcu(&rule->list);
- if (trans) {
- list_del_rcu(&nft_trans_rule(trans)->list);
- nft_rule_clear(net, nft_trans_rule(trans));
- nft_trans_destroy(trans);
- chain->use++;
- }
err2:
nf_tables_rule_destroy(&ctx, rule);
err1:
@@ -3612,12 +3620,11 @@ static int nf_tables_commit(struct sk_buff *skb)
&te->elem,
NFT_MSG_DELSETELEM, 0);
te->set->ops->get(te->set, &te->elem);
- te->set->ops->remove(te->set, &te->elem);
nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
- if (te->elem.flags & NFT_SET_MAP) {
- nft_data_uninit(&te->elem.data,
- te->set->dtype);
- }
+ if (te->set->flags & NFT_SET_MAP &&
+ !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
+ nft_data_uninit(&te->elem.data, te->set->dtype);
+ te->set->ops->remove(te->set, &te->elem);
nft_trans_destroy(trans);
break;
}
@@ -3658,7 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
struct nft_trans *trans, *next;
- struct nft_set *set;
+ struct nft_trans_elem *te;
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
switch (trans->msg_type) {
@@ -3719,9 +3726,13 @@ static int nf_tables_abort(struct sk_buff *skb)
break;
case NFT_MSG_NEWSETELEM:
nft_trans_elem_set(trans)->nelems--;
- set = nft_trans_elem_set(trans);
- set->ops->get(set, &nft_trans_elem(trans));
- set->ops->remove(set, &nft_trans_elem(trans));
+ te = (struct nft_trans_elem *)trans->data;
+ te->set->ops->get(te->set, &te->elem);
+ nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
+ if (te->set->flags & NFT_SET_MAP &&
+ !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
+ nft_data_uninit(&te->elem.data, te->set->dtype);
+ te->set->ops->remove(te->set, &te->elem);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSETELEM:
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 1279cd85663e..213584cf04b3 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -123,7 +123,7 @@ static void
nft_target_set_tgchk_param(struct xt_tgchk_param *par,
const struct nft_ctx *ctx,
struct xt_target *target, void *info,
- union nft_entry *entry, u8 proto, bool inv)
+ union nft_entry *entry, u16 proto, bool inv)
{
par->net = ctx->net;
par->table = ctx->table->name;
@@ -137,7 +137,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
case NFPROTO_BRIDGE:
- entry->ebt.ethproto = proto;
+ entry->ebt.ethproto = (__force __be16)proto;
entry->ebt.invflags = inv ? EBT_IPROTO : 0;
break;
}
@@ -171,7 +171,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
[NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 },
};
-static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv)
+static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
{
struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
u32 flags;
@@ -203,7 +203,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_target *target = expr->ops->data;
struct xt_tgchk_param par;
size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
- u8 proto = 0;
+ u16 proto = 0;
bool inv = false;
union nft_entry e = {};
int ret;
@@ -334,7 +334,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
static void
nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
struct xt_match *match, void *info,
- union nft_entry *entry, u8 proto, bool inv)
+ union nft_entry *entry, u16 proto, bool inv)
{
par->net = ctx->net;
par->table = ctx->table->name;
@@ -348,7 +348,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
case NFPROTO_BRIDGE:
- entry->ebt.ethproto = proto;
+ entry->ebt.ethproto = (__force __be16)proto;
entry->ebt.invflags = inv ? EBT_IPROTO : 0;
break;
}
@@ -385,7 +385,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_match *match = expr->ops->data;
struct xt_mtchk_param par;
size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
- u8 proto = 0;
+ u16 proto = 0;
bool inv = false;
union nft_entry e = {};
int ret;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5bf1e968a728..f8db7064d81c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3123,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
return 0;
}
-static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
+static void packet_dev_mclist_delete(struct net_device *dev,
+ struct packet_mclist **mlp)
{
- for ( ; i; i = i->next) {
- if (i->ifindex == dev->ifindex)
- packet_dev_mc(dev, i, what);
+ struct packet_mclist *ml;
+
+ while ((ml = *mlp) != NULL) {
+ if (ml->ifindex == dev->ifindex) {
+ packet_dev_mc(dev, ml, -1);
+ *mlp = ml->next;
+ kfree(ml);
+ } else
+ mlp = &ml->next;
}
}
@@ -3204,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
packet_dev_mc(dev, ml, -1);
kfree(ml);
}
- rtnl_unlock();
- return 0;
+ break;
}
}
rtnl_unlock();
- return -EADDRNOTAVAIL;
+ return 0;
}
static void packet_flush_mclist(struct sock *sk)
@@ -3559,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this,
switch (msg) {
case NETDEV_UNREGISTER:
if (po->mclist)
- packet_dev_mclist(dev, po->mclist, -1);
+ packet_dev_mclist_delete(dev, &po->mclist);
/* fallthrough */
case NETDEV_DOWN:
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5394b6be46ec..0610efa83d72 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk)
_leave("UDP socket errqueue empty");
return;
}
- if (!skb->len) {
+ serr = SKB_EXT_ERR(skb);
+ if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
kfree_skb(skb);
return;
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
rxrpc_new_skb(skb);
- serr = SKB_EXT_ERR(skb);
addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
port = serr->port;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a4cf364316de..14f09b3cb87c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr)
/* Clean up all queues, except inputq: */
__skb_queue_purge(&l_ptr->outqueue);
__skb_queue_purge(&l_ptr->deferred_queue);
- skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
- if (!skb_queue_empty(&l_ptr->inputq))
+ if (!owner->inputq)
+ owner->inputq = &l_ptr->inputq;
+ skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
+ if (!skb_queue_empty(owner->inputq))
owner->action_flags |= TIPC_MSG_EVT;
- owner->inputq = &l_ptr->inputq;
l_ptr->next_out = NULL;
l_ptr->unacked_window = 0;
l_ptr->checkpoint = 1;
diff --git a/sound/core/control.c b/sound/core/control.c
index 35324a8e83c8..eeb691d1911f 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
if (info->count < 1)
return -EINVAL;
+ if (!*info->id.name)
+ return -EINVAL;
+ if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
+ return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
diff --git a/sound/firewire/dice/dice-interface.h b/sound/firewire/dice/dice-interface.h
index de7602bd69b5..27b044f84c81 100644
--- a/sound/firewire/dice/dice-interface.h
+++ b/sound/firewire/dice/dice-interface.h
@@ -299,23 +299,23 @@
#define RX_ISOCHRONOUS 0x008
/*
+ * Index of first quadlet to be interpreted; read/write. If > 0, that many
+ * quadlets at the beginning of each data block will be ignored, and all the
+ * audio and MIDI quadlets will follow.
+ */
+#define RX_SEQ_START 0x00c
+
+/*
* The number of audio channels; read-only. There will be one quadlet per
* channel.
*/
-#define RX_NUMBER_AUDIO 0x00c
+#define RX_NUMBER_AUDIO 0x010
/*
* The number of MIDI ports, 0-8; read-only. If > 0, there will be one
* additional quadlet in each data block, following the audio quadlets.
*/
-#define RX_NUMBER_MIDI 0x010
-
-/*
- * Index of first quadlet to be interpreted; read/write. If > 0, that many
- * quadlets at the beginning of each data block will be ignored, and all the
- * audio and MIDI quadlets will follow.
- */
-#define RX_SEQ_START 0x014
+#define RX_NUMBER_MIDI 0x014
/*
* Names of all audio channels; read-only. Quadlets are byte-swapped. Names
diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c
index ecfe20fd4de5..f5c1d1bced59 100644
--- a/sound/firewire/dice/dice-proc.c
+++ b/sound/firewire/dice/dice-proc.c
@@ -99,9 +99,9 @@ static void dice_proc_read(struct snd_info_entry *entry,
} tx;
struct {
u32 iso;
+ u32 seq_start;
u32 number_audio;
u32 number_midi;
- u32 seq_start;
char names[RX_NAMES_SIZE];
u32 ac3_caps;
u32 ac3_enable;
@@ -204,10 +204,10 @@ static void dice_proc_read(struct snd_info_entry *entry,
break;
snd_iprintf(buffer, "rx %u:\n", stream);
snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso);
+ snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start);
snd_iprintf(buffer, " audio channels: %u\n",
buf.rx.number_audio);
snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi);
- snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start);
if (quadlets >= 68) {
dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE);
snd_iprintf(buffer, " names: %s\n", buf.rx.names);
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
index 5f17b77ee152..f0e4d502d604 100644
--- a/sound/firewire/iso-resources.c
+++ b/sound/firewire/iso-resources.c
@@ -26,7 +26,7 @@
int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
{
r->channels_mask = ~0uLL;
- r->unit = fw_unit_get(unit);
+ r->unit = unit;
mutex_init(&r->mutex);
r->allocated = false;
@@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r)
{
WARN_ON(r->allocated);
mutex_destroy(&r->mutex);
- fw_unit_put(r->unit);
}
EXPORT_SYMBOL(fw_iso_resources_destroy);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a2ce773bdc62..17c2637d842c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1164,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
}
}
- if (!bus->no_response_fallback)
+ if (bus->no_response_fallback)
return -1;
if (!chip->polling_mode && chip->poll_count < 2) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b680b4ec6331..fe18071bf93a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -692,7 +692,23 @@ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
{
unsigned int caps = query_amp_caps(codec, nid, dir);
int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
- snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+
+ if (get_wcaps(codec, nid) & AC_WCAP_STEREO)
+ snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+ else
+ snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
+}
+
+/* update the amp, doing in stereo or mono depending on NID */
+static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
+ unsigned int mask, unsigned int val)
+{
+ if (get_wcaps(codec, nid) & AC_WCAP_STEREO)
+ return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
+ mask, val);
+ else
+ return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
+ mask, val);
}
/* calculate amp value mask we can modify;
@@ -732,7 +748,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
return;
val &= mask;
- snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
+ update_amp(codec, nid, dir, idx, mask, val);
}
static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
@@ -4424,13 +4440,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
has_amp = nid_has_mute(codec, mix, HDA_INPUT);
for (i = 0; i < nums; i++) {
if (has_amp)
- snd_hda_codec_amp_stereo(codec, mix,
- HDA_INPUT, i,
- 0xff, HDA_AMP_MUTE);
+ update_amp(codec, mix, HDA_INPUT, i,
+ 0xff, HDA_AMP_MUTE);
else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
- snd_hda_codec_amp_stereo(codec, conn[i],
- HDA_OUTPUT, 0,
- 0xff, HDA_AMP_MUTE);
+ update_amp(codec, conn[i], HDA_OUTPUT, 0,
+ 0xff, HDA_AMP_MUTE);
}
}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 1589c9bcce3e..dd2b3d92071f 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+ SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
{} /* terminator */
@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec)
return -ENOMEM;
spec->gen.automute_hook = cs_automute;
+ codec->single_adc_amp = 1;
snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
cs420x_fixups);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fd3ed18670e9..da67ea8645a6 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -223,6 +223,7 @@ enum {
CXT_PINCFG_LENOVO_TP410,
CXT_PINCFG_LEMOTE_A1004,
CXT_PINCFG_LEMOTE_A1205,
+ CXT_PINCFG_COMPAQ_CQ60,
CXT_FIXUP_STEREO_DMIC,
CXT_FIXUP_INC_MIC_BOOST,
CXT_FIXUP_HEADPHONE_MIC_PIN,
@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_PINS,
.v.pins = cxt_pincfg_lemote,
},
+ [CXT_PINCFG_COMPAQ_CQ60] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* 0x17 was falsely set up as a mic, it should 0x1d */
+ { 0x17, 0x400001f0 },
+ { 0x1d, 0x97a70120 },
+ { }
+ }
+ },
[CXT_FIXUP_STEREO_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_stereo_dmic,
@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = {
};
static const struct snd_pci_quirk cxt5051_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
{}
};
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 75870c0ea2c9..91eb3aef7f02 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
enum spdif_txrate index, bool round)
{
const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
- bool is_sysclk = clk == spdif_priv->sysclk;
+ bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
u64 rate_ideal, rate_actual, sub;
u32 sysclk_dfmin, sysclk_dfmax;
u32 txclk_df, sysclk_df, arate;
@@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
spdif_priv->txclk_src[index], rate[index]);
dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n",
spdif_priv->txclk_df[index], rate[index]);
- if (spdif_priv->txclk[index] == spdif_priv->sysclk)
+ if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk))
dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n",
spdif_priv->sysclk_df[index], rate[index]);
dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n",
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index def7d8260c4e..d19483081f9b 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
} else {
- if (priv->extclk == priv->clk) {
+ if (clk_is_match(priv->extclk, priv->clk)) {
devm_clk_put(&pdev->dev, priv->extclk);
priv->extclk = ERR_PTR(-EINVAL);
} else {
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 67d476548dcf..07f984d5f516 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
}
},
+{
+ USB_DEVICE(0x0582, 0x0159),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "Roland", */
+ /* .product_name = "UA-22", */
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
/* this catches most recent vendor-specific Roland devices */
{
.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 3ed7c0476d48..2e2ba2efa0d9 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c
$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
$(ECHO) " CC " $@
- $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@
+ $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
$(QUIET) $(STRIPCMD) $@
$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index e238c9559caf..8d5d1d2ee7c1 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp,
#ifdef __NR_execveat
return syscall(__NR_execveat, fd, path, argv, envp, flags);
#else
- errno = -ENOSYS;
+ errno = ENOSYS;
return -1;
#endif
}
@@ -234,6 +234,14 @@ static int run_tests(void)
int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC);
int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC);
+ /* Check if we have execveat at all, and bail early if not */
+ errno = 0;
+ execveat_(-1, NULL, NULL, NULL, 0);
+ if (errno == ENOSYS) {
+ printf("[FAIL] ENOSYS calling execveat - no kernel support?\n");
+ return 1;
+ }
+
/* Change file position to confirm it doesn't affect anything */
lseek(fd, 10, SEEK_SET);