summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorSean Paul <seanpaul@chromium.org>2018-10-24 14:26:04 -0400
committerSean Paul <seanpaul@chromium.org>2018-10-24 14:26:04 -0400
commit6542e9adc0da1e23d81ff9314265a029b961906d (patch)
tree35ba360a7150d8b042a9fd75ab54ef83b34a2b95 /drivers/gpu/drm
parent2b02a05bdc3a62d36e0d0b015351897109e25991 (diff)
parentf2bfc71aee75feff33ca659322b72ffeed5a243d (diff)
Merge drm/drm-next into drm-misc-next
4.19 is out, Lyude asked for a backmerge, and it's been a while. All very good reasons on their own :-) Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c192
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c441
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c540
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c65
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c44
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c164
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h49
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c58
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c8
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h43
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h88
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h10
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c82
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c161
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c193
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c156
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h121
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c55
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c61
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c28
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c131
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h8
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c83
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h16
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c347
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h13
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c33
-rw-r--r--drivers/gpu/drm/drm_panel.c10
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c14
-rw-r--r--drivers/gpu/drm/drm_syncobj.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c27
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c107
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c46
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c137
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c56
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c300
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h37
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c40
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c125
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h57
-rw-r--r--drivers/gpu/drm/i915/i915_request.c14
-rw-r--r--drivers/gpu/drm/i915/i915_request.h8
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c13
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c53
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c38
-rw-r--r--drivers/gpu/drm/i915/intel_display.c779
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c41
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h47
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c80
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c57
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c147
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c23
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c101
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c516
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c202
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c5
-rw-r--r--drivers/gpu/drm/mediatek/Makefile5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c131
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.c235
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_phy.h60
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c212
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c226
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h1413
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c144
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c38
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c161
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c737
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c125
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c109
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c499
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c285
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h116
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c220
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c179
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c35
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c201
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h93
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c2
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c3
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c78
-rw-r--r--drivers/gpu/drm/tegra/dc.c73
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.c35
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/fb.c24
-rw-r--r--drivers/gpu/drm/tegra/hub.c19
-rw-r--r--drivers/gpu/drm/tegra/hub.h1
-rw-r--r--drivers/gpu/drm/tegra/sor.c110
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c14
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h3
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c (renamed from drivers/gpu/drm/ttm/ttm_lock.c)15
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h248
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c (renamed from drivers/gpu/drm/ttm/ttm_object.c)97
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h375
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h147
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1288
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c124
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c770
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h227
402 files changed, 12811 insertions, 8557 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 736b7e67e4ec..4385f00e1d05 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -110,6 +110,26 @@ config DRM_FBDEV_OVERALLOC
is 100. Typical values for double buffering will be 200,
triple buffering 300.
+config DRM_FBDEV_LEAK_PHYS_SMEM
+ bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+ depends on DRM_FBDEV_EMULATION && EXPERT
+ default n
+ help
+ In order to keep user-space compatibility, we want in certain
+ use-cases to keep leaking the fbdev physical address to the
+ user-space program handling the fbdev buffer.
+ This affects, not only, Amlogic, Allwinner or Rockchip devices
+ with ARM Mali GPUs using an userspace Blob.
+ This option is not supported by upstream developers and should be
+ removed as soon as possible and be considered as a broken and
+ legacy behaviour from a modern fbdev device driver.
+
+ Please send any bug reports when using this to your proprietary
+ software vendor that requires this.
+
+ If in doubt, say "N" or spread the word to your closed source
+ library vendor.
+
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6cb35e3dab30..d0102cfc8efb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -81,6 +81,23 @@
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
+#define MAX_GPU_INSTANCE 16
+
+struct amdgpu_gpu_instance
+{
+ struct amdgpu_device *adev;
+ int mgpu_fan_enabled;
+};
+
+struct amdgpu_mgpu_info
+{
+ struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
+ struct mutex mutex;
+ uint32_t num_gpu;
+ uint32_t num_dgpu;
+ uint32_t num_apu;
+};
+
/*
* Modules parameters.
*/
@@ -134,6 +151,7 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern struct amdgpu_mgpu_info mgpu_info;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -146,6 +164,7 @@ extern int amdgpu_cik_support;
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
@@ -408,16 +427,25 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
/*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xef
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
* Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
*/
- /* sDMA engines */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+ /* sDMA engines reserved from 0xe0 -oxef */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
/* Interrupt handler */
AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
@@ -588,31 +616,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/
void amdgpu_test_moves(struct amdgpu_device *adev);
-
-/*
- * amdgpu smumgr functions
- */
-struct amdgpu_smumgr_funcs {
- int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
- int (*request_smu_load_fw)(struct amdgpu_device *adev);
- int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
-};
-
-/*
- * amdgpu smumgr
- */
-struct amdgpu_smumgr {
- struct amdgpu_bo *toc_buf;
- struct amdgpu_bo *smu_buf;
- /* asic priv smu data */
- void *priv;
- spinlock_t smu_lock;
- /* smumgr functions */
- const struct amdgpu_smumgr_funcs *smumgr_funcs;
- /* ucode loading complete flag */
- uint32_t fw_flags;
-};
-
/*
* ASIC specific register table accessible by UMD
*/
@@ -948,9 +951,6 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
- /* amdgpu smumgr */
- struct amdgpu_smumgr smu;
-
/* gfx */
struct amdgpu_gfx gfx;
@@ -1015,6 +1015,9 @@ struct amdgpu_device {
bool has_hw_reset;
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
+ /* s3/s4 mask */
+ bool in_suspend;
+
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
bool in_gpu_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 6488e90ec948..7f0afc526419 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -359,7 +359,9 @@ out:
*
* Checks the acpi event and if it matches an atif event,
* handles it.
- * Returns NOTIFY code
+ *
+ * Returns:
+ * NOTIFY_BAD or NOTIFY_DONE, depending on the event.
*/
static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
@@ -373,11 +375,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
+ /* Is this actually our event? */
if (!atif ||
!atif->notification_cfg.enabled ||
- event->type != atif->notification_cfg.command_code)
- /* Not our event */
- return NOTIFY_DONE;
+ event->type != atif->notification_cfg.command_code) {
+ /* These events will generate keypresses otherwise */
+ if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
+ return NOTIFY_BAD;
+ else
+ return NOTIFY_DONE;
+ }
if (atif->functions.sbios_requests) {
struct atif_sbios_requests req;
@@ -386,7 +393,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
count = amdgpu_atif_get_sbios_requests(atif, &req);
if (count <= 0)
- return NOTIFY_DONE;
+ return NOTIFY_BAD;
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 0f9947edb12a..c31a8849e9f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -76,6 +76,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
break;
@@ -123,7 +124,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
- int i;
+ int i, n;
int last_valid_bit;
if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
@@ -162,7 +163,15 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_physical_address,
&gpu_resources.doorbell_aperture_size,
&gpu_resources.doorbell_start_offset);
- if (adev->asic_type >= CHIP_VEGA10) {
+
+ if (adev->asic_type < CHIP_VEGA10) {
+ kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ return;
+ }
+
+ n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
+
+ for (i = 0; i < n; i += 2) {
/* On SOC15 the BIF is involved in routing
* doorbells using the low 12 bits of the
* address. Communicate the assignments to
@@ -170,20 +179,31 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* process in case of 64-bit doorbells so we
* can use each doorbell assignment twice.
*/
- gpu_resources.sdma_doorbell[0][0] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0;
- gpu_resources.sdma_doorbell[0][1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
- gpu_resources.sdma_doorbell[1][0] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1;
- gpu_resources.sdma_doorbell[1][1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
- /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
- * SDMA, IH and VCN. So don't use them for the CP.
- */
- gpu_resources.reserved_doorbell_mask = 0x1f0;
- gpu_resources.reserved_doorbell_val = 0x0f0;
+ if (adev->asic_type == CHIP_VEGA10) {
+ gpu_resources.sdma_doorbell[0][i] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+ } else {
+ gpu_resources.sdma_doorbell[0][i] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+ }
}
+ /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
+ * SDMA, IH and VCN. So don't use them for the CP.
+ */
+ gpu_resources.reserved_doorbell_mask = 0x1e0;
+ gpu_resources.reserved_doorbell_val = 0x0e0;
kgd2kfd->device_init(adev->kfd, &gpu_resources);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 056fc6ef6c63..8e0d4f7196b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -174,7 +174,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
-uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct kgd_dev *kgd, uint64_t va, uint64_t size,
void *vm, struct kgd_mem **mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b2e45c8e2e0d..244d9834a381 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -142,7 +142,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base);
+ uint64_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
@@ -874,7 +874,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
}
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base)
+ uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -882,7 +882,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
pr_err("trying to set page table base for wrong VMID\n");
return;
}
- WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
+ lower_32_bits(page_table_base));
}
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index ea7c18ce7754..9f149914ad6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -45,8 +45,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-struct vi_sdma_mqd;
-
/*
* Register access functions
*/
@@ -100,7 +98,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base);
+ uint64_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
@@ -282,7 +280,8 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
lock_srbm(kgd, mec, pipe, 0, 0);
- WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
unlock_srbm(kgd);
@@ -834,7 +833,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
}
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base)
+ uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -842,7 +841,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
pr_err("trying to set page table base for wrong VMID\n");
return;
}
- WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
+ lower_32_bits(page_table_base));
}
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index c9176537550b..42cb4c4e0929 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -138,7 +138,7 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base);
+ uint64_t page_table_base);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
@@ -1013,11 +1013,10 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
}
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base)
+ uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT |
- AMDGPU_PTE_VALID;
+ uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6ee9dc476c86..df0a059565f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1131,11 +1131,15 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
amdgpu_vm_release_compute(adev, avm);
}
-uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
{
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_bo *pd = avm->root.base.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
- return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ if (adev->asic_type < CHIP_VEGA10)
+ return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ return avm->pd_phys_addr;
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5fb93795a69..dd9a4fb9ce39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -826,21 +826,13 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
- unsigned i, j;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
S_IFREG | S_IRUGO, root,
adev, debugfs_regs[i]);
- if (IS_ERR(ent)) {
- for (j = 0; j < i; j++) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- return PTR_ERR(ent);
- }
-
- if (!i)
+ if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
adev->debugfs_regs[i] = ent;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bd79d0a31942..1e4dd09a5072 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1525,6 +1525,92 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+ if (adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (r) {
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+ if (adev->ip_blocks[i].status.hw)
+ continue;
+ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (r) {
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ return 0;
+}
+
+static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
+{
+ int r = 0;
+ int i;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
+ if (adev->in_gpu_reset || adev->in_suspend) {
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
+ break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ } else {
+ r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (r) {
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+ }
+
+ if (adev->powerplay.pp_funcs->load_firmware) {
+ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("firmware loading failed\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
/**
* amdgpu_device_ip_init - run init for hardware IPs
*
@@ -1581,19 +1667,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.sw)
- continue;
- if (adev->ip_blocks[i].status.hw)
- continue;
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
- if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- adev->ip_blocks[i].status.hw = true;
- }
+ r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_hw_init_phase1(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_hw_init_phase2(adev);
+ if (r)
+ return r;
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
@@ -1656,7 +1744,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
for (j = 0; j < adev->num_ip_blocks; j++) {
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1686,7 +1774,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
for (j = 0; j < adev->num_ip_blocks; j++) {
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1723,7 +1811,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
@@ -1732,8 +1820,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_blocks[i].status.late_initialized = true;
}
+ adev->ip_blocks[i].status.late_initialized = true;
}
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
@@ -1803,6 +1891,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(adev);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
@@ -1833,6 +1922,43 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_device_enable_mgpu_fan_boost(void)
+{
+ struct amdgpu_gpu_instance *gpu_ins;
+ struct amdgpu_device *adev;
+ int i, ret = 0;
+
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * MGPU fan boost feature should be enabled
+ * only when there are two or more dGPUs in
+ * the system
+ */
+ if (mgpu_info.num_dgpu < 2)
+ goto out;
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ gpu_ins = &(mgpu_info.gpu_ins[i]);
+ adev = gpu_ins->adev;
+ if (!(adev->flags & AMD_IS_APU) &&
+ !gpu_ins->mgpu_fan_enabled &&
+ adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
+ ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
+ if (ret)
+ break;
+
+ gpu_ins->mgpu_fan_enabled = 1;
+ }
+ }
+
+out:
+ mutex_unlock(&mgpu_info.mutex);
+
+ return ret;
+}
+
/**
* amdgpu_device_ip_late_init_func_handler - work handler for ib test
*
@@ -1847,6 +1973,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+
+ r = amdgpu_device_enable_mgpu_fan_boost();
+ if (r)
+ DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2082,7 +2212,8 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -2114,6 +2245,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
+
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_ip_resume_phase2(adev);
return r;
@@ -2608,6 +2744,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ adev->in_suspend = true;
drm_kms_helper_poll_disable(dev);
if (fbcon)
@@ -2793,6 +2930,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
+ adev->in_suspend = false;
+
return 0;
}
@@ -3061,6 +3200,10 @@ retry:
if (r)
goto out;
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ return r;
+
r = amdgpu_device_ip_resume_phase2(adev);
if (r)
goto out;
@@ -3117,6 +3260,10 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
/* we need recover gart prior to run SMC/CP/SDMA resume */
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ return r;
+
/* now we are okay to resume SMC/CP/SDMA */
r = amdgpu_device_ip_reinit_late_sriov(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index ff24e1cc5b65..f972cd156795 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -278,6 +278,9 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
+#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
+ ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
+
#define amdgpu_dpm_get_sclk(adev, l) \
((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
@@ -357,6 +360,10 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
+#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \
+ ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
+ (adev)->powerplay.pp_handle))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 723f0f7754bd..28781414d71c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+struct amdgpu_mgpu_info mgpu_info = {
+ .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
+};
/**
* DOC: vramlimit (int)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 176f28777f5e..5448cf27654e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -196,6 +196,19 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
}
/**
+ * amdgpu_fence_schedule_fallback - schedule fallback check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Start a timer as fallback to our interrupts.
+ */
+static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+{
+ mod_timer(&ring->fence_drv.fallback_timer,
+ jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
* amdgpu_fence_process - check for fence activity
*
* @ring: pointer to struct amdgpu_ring
@@ -203,8 +216,10 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
* Checks the current fence value and calculates the last
* signalled fence value. Wakes the fence queue if the
* sequence number has increased.
+ *
+ * Returns true if fence was processed
*/
-void amdgpu_fence_process(struct amdgpu_ring *ring)
+bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
uint32_t seq, last_seq;
@@ -216,8 +231,12 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
+ if (del_timer(&ring->fence_drv.fallback_timer) &&
+ seq != ring->fence_drv.sync_seq)
+ amdgpu_fence_schedule_fallback(ring);
+
if (unlikely(seq == last_seq))
- return;
+ return false;
last_seq &= drv->num_fences_mask;
seq &= drv->num_fences_mask;
@@ -244,6 +263,24 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
} while (last_seq != seq);
+
+ return true;
+}
+
+/**
+ * amdgpu_fence_fallback - fallback for hardware interrupts
+ *
+ * @work: delayed work item
+ *
+ * Checks for fence activity.
+ */
+static void amdgpu_fence_fallback(struct timer_list *t)
+{
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
+
+ if (amdgpu_fence_process(ring))
+ DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
}
/**
@@ -393,6 +430,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
+
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
@@ -468,6 +507,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
drm_sched_fini(&ring->sched);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
@@ -561,6 +601,27 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
}
/**
+ * amdgpu_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
+ *
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
+ */
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_fence *fence = to_amdgpu_fence(f);
+ struct amdgpu_ring *ring = fence->ring;
+
+ if (!timer_pending(&ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(ring);
+
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+
+ return true;
+}
+
+/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -590,6 +651,7 @@ static void amdgpu_fence_release(struct dma_fence *f)
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
+ .enable_signaling = amdgpu_fence_enable_signaling,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f172e92c463c..b61b5c11aead 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -297,8 +297,7 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- /* s3/s4 mask */
- bool in_suspend;
+
/* NGG */
struct amdgpu_ngg ngg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 9a5b252784a1..d73367cab4f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -146,6 +146,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
{
const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
+ /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
+ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
mc->gart_size += adev->pm.smu_prv_buffer_size;
@@ -153,7 +155,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
* the GART base on a 4GB boundary as well.
*/
size_bf = mc->fb_start;
- size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb);
+ size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
if (mc->gart_size > max(size_bf, size_af)) {
dev_warn(adev->dev, "limiting GART\n");
@@ -164,7 +166,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
(size_af < mc->gart_size))
mc->gart_start = 0;
else
- mc->gart_start = mc->mc_mask - mc->gart_size + 1;
+ mc->gart_start = max_mc_address - mc->gart_size + 1;
mc->gart_start &= ~(four_gb - 1);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
@@ -200,16 +202,13 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
}
if (size_bf > size_af) {
- mc->agp_start = mc->fb_start > mc->gart_start ?
- mc->gart_end + 1 : 0;
+ mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
mc->agp_size = size_bf;
} else {
- mc->agp_start = (mc->fb_start > mc->gart_start ?
- mc->fb_end : mc->gart_end) + 1,
+ mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
mc->agp_size = size_af;
}
- mc->agp_start = ALIGN(mc->agp_start, sixteen_gb);
mc->agp_end = mc->agp_start + mc->agp_size - 1;
dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 47817e00f54f..b8963b725dfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -354,6 +354,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
if (!ring || !ring->ready)
continue;
+ /* skip IB tests for KIQ in general for the below reasons:
+ * 1. We never submit IBs to the KIQ
+ * 2. KIQ doesn't use the EOP interrupts,
+ * we use some other CP interrupt.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
/* MM engine need more time */
if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3a072a7a39f0..df9b173c3d0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -574,7 +574,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
amdgpu_vmid_reset(adev, i, j);
- amdgpu_sync_create(&id_mgr->ids[i].active);
+ amdgpu_sync_create(&id_mgr->ids[j].active);
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 4ed86218cef3..8af67f649660 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,46 +24,21 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
-#include "amdgpu_amdkfd.h"
-
-/**
- * amdgpu_ih_ring_alloc - allocate memory for the IH ring
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate a ring buffer for the interrupt controller.
- * Returns 0 for success, errors for failure.
- */
-static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
-{
- int r;
-
- /* Allocate ring buffer */
- if (adev->irq.ih.ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->irq.ih.ring_obj,
- &adev->irq.ih.gpu_addr,
- (void **)&adev->irq.ih.ring);
- if (r) {
- DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
- return r;
- }
- }
- return 0;
-}
/**
* amdgpu_ih_ring_init - initialize the IH state
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to initialize
+ * @ring_size: ring size to allocate
+ * @use_bus_addr: true when we can use dma_alloc_coherent
*
* Initializes the IH state and allocates a buffer
* for the IH ring buffer.
* Returns 0 for success, errors for failure.
*/
-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
- bool use_bus_addr)
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr)
{
u32 rb_bufsz;
int r;
@@ -71,70 +46,76 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
- adev->irq.ih.ring_size = ring_size;
- adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
- adev->irq.ih.rptr = 0;
- adev->irq.ih.use_bus_addr = use_bus_addr;
-
- if (adev->irq.ih.use_bus_addr) {
- if (!adev->irq.ih.ring) {
- /* add 8 bytes for the rptr/wptr shadows and
- * add them to the end of the ring allocation.
- */
- adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
- adev->irq.ih.ring_size + 8,
- &adev->irq.ih.rb_dma_addr);
- if (adev->irq.ih.ring == NULL)
- return -ENOMEM;
- memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
- adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
- adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
- }
- return 0;
+ ih->ring_size = ring_size;
+ ih->ptr_mask = ih->ring_size - 1;
+ ih->rptr = 0;
+ ih->use_bus_addr = use_bus_addr;
+
+ if (use_bus_addr) {
+ if (ih->ring)
+ return 0;
+
+ /* add 8 bytes for the rptr/wptr shadows and
+ * add them to the end of the ring allocation.
+ */
+ ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
+ &ih->rb_dma_addr, GFP_KERNEL);
+ if (ih->ring == NULL)
+ return -ENOMEM;
+
+ memset((void *)ih->ring, 0, ih->ring_size + 8);
+ ih->wptr_offs = (ih->ring_size / 4) + 0;
+ ih->rptr_offs = (ih->ring_size / 4) + 1;
} else {
- r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
+ r = amdgpu_device_wb_get(adev, &ih->wptr_offs);
+ if (r)
+ return r;
+
+ r = amdgpu_device_wb_get(adev, &ih->rptr_offs);
if (r) {
- dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
+ amdgpu_device_wb_free(adev, ih->wptr_offs);
return r;
}
- r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ih->ring_obj, &ih->gpu_addr,
+ (void **)&ih->ring);
if (r) {
- amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
- dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
+ amdgpu_device_wb_free(adev, ih->rptr_offs);
+ amdgpu_device_wb_free(adev, ih->wptr_offs);
return r;
}
-
- return amdgpu_ih_ring_alloc(adev);
}
+ return 0;
}
/**
* amdgpu_ih_ring_fini - tear down the IH state
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to tear down
*
* Tears down the IH state and frees buffer
* used for the IH ring buffer.
*/
-void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
- if (adev->irq.ih.use_bus_addr) {
- if (adev->irq.ih.ring) {
- /* add 8 bytes for the rptr/wptr shadows and
- * add them to the end of the ring allocation.
- */
- pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
- (void *)adev->irq.ih.ring,
- adev->irq.ih.rb_dma_addr);
- adev->irq.ih.ring = NULL;
- }
+ if (ih->use_bus_addr) {
+ if (!ih->ring)
+ return;
+
+ /* add 8 bytes for the rptr/wptr shadows and
+ * add them to the end of the ring allocation.
+ */
+ dma_free_coherent(adev->dev, ih->ring_size + 8,
+ (void *)ih->ring, ih->rb_dma_addr);
+ ih->ring = NULL;
} else {
- amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
- &adev->irq.ih.gpu_addr,
- (void **)&adev->irq.ih.ring);
- amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
+ (void **)&ih->ring);
+ amdgpu_device_wb_free(adev, ih->wptr_offs);
+ amdgpu_device_wb_free(adev, ih->rptr_offs);
}
}
@@ -142,56 +123,43 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
* amdgpu_ih_process - interrupt handler
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to process
*
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev)
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ void (*callback)(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih))
{
- struct amdgpu_iv_entry entry;
u32 wptr;
- if (!adev->irq.ih.enabled || adev->shutdown)
+ if (!ih->enabled || adev->shutdown)
return IRQ_NONE;
wptr = amdgpu_ih_get_wptr(adev);
restart_ih:
/* is somebody else already processing irqs? */
- if (atomic_xchg(&adev->irq.ih.lock, 1))
+ if (atomic_xchg(&ih->lock, 1))
return IRQ_NONE;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
+ DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
- while (adev->irq.ih.rptr != wptr) {
- u32 ring_index = adev->irq.ih.rptr >> 2;
-
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev)) {
- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
- continue;
- }
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev,
- (const void *) &adev->irq.ih.ring[ring_index]);
-
- entry.iv_entry = (const uint32_t *)
- &adev->irq.ih.ring[ring_index];
- amdgpu_ih_decode_iv(adev, &entry);
- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
-
- amdgpu_irq_dispatch(adev, &entry);
+ while (ih->rptr != wptr) {
+ callback(adev, ih);
+ ih->rptr &= ih->ptr_mask;
}
+
amdgpu_ih_set_rptr(adev);
- atomic_set(&adev->irq.ih.lock, 0);
+ atomic_set(&ih->lock, 0);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev);
- if (wptr != adev->irq.ih.rptr)
+ if (wptr != ih->rptr)
goto restart_ih;
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 0d5b3f5201d2..9ce8c93ec19b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,12 +24,8 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
-#include "soc15_ih_clientid.h"
-
struct amdgpu_device;
-
-#define AMDGPU_IH_CLIENTID_LEGACY 0
-#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+struct amdgpu_iv_entry;
/*
* R6xx+ IH ring
@@ -51,22 +47,6 @@ struct amdgpu_ih_ring {
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
};
-#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
-
-struct amdgpu_iv_entry {
- unsigned client_id;
- unsigned src_id;
- unsigned ring_id;
- unsigned vmid;
- unsigned vmid_src;
- uint64_t timestamp;
- unsigned timestamp_src;
- unsigned pasid;
- unsigned pasid_src;
- unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
- const uint32_t *iv_entry;
-};
-
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
@@ -82,9 +62,11 @@ struct amdgpu_ih_funcs {
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
- bool use_bus_addr);
-void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
-int amdgpu_ih_process(struct amdgpu_device *adev);
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr);
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ void (*callback)(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih));
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b927e8798534..52c17f6219a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -51,6 +51,7 @@
#include "atom.h"
#include "amdgpu_connectors.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
#include <linux/pm_runtime.h>
@@ -123,7 +124,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
int r;
spin_lock_irqsave(&adev->irq.lock, irqflags);
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
@@ -147,6 +148,34 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
+ * amdgpu_irq_callback - callback from the IH ring
+ *
+ * @adev: amdgpu device pointer
+ * @ih: amdgpu ih ring
+ *
+ * Callback from IH ring processing to handle the entry at the current position
+ * and advance the read pointer.
+ */
+static void amdgpu_irq_callback(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 ring_index = ih->rptr >> 2;
+ struct amdgpu_iv_entry entry;
+
+ /* Prescreening of high-frequency interrupts */
+ if (!amdgpu_ih_prescreen_iv(adev))
+ return;
+
+ /* Before dispatching irq to IP blocks, send it to amdkfd */
+ amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
+
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+ amdgpu_ih_decode_iv(adev, &entry);
+
+ amdgpu_irq_dispatch(adev, &entry);
+}
+
+/**
* amdgpu_irq_handler - IRQ handler
*
* @irq: IRQ number (unused)
@@ -163,7 +192,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
struct amdgpu_device *adev = dev->dev_private;
irqreturn_t ret;
- ret = amdgpu_ih_process(adev);
+ ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
return ret;
@@ -273,7 +302,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
cancel_work_sync(&adev->reset_work);
}
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
@@ -313,7 +342,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source)
{
- if (client_id >= AMDGPU_IH_CLIENTID_MAX)
+ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
return -EINVAL;
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
@@ -367,7 +396,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
trace_amdgpu_iv(entry);
- if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
+ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
return;
}
@@ -440,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 3375ad778edc..f6ce171cb8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -25,19 +25,38 @@
#define __AMDGPU_IRQ_H__
#include <linux/irqdomain.h>
+#include "soc15_ih_clientid.h"
#include "amdgpu_ih.h"
-#define AMDGPU_MAX_IRQ_SRC_ID 0x100
+#define AMDGPU_MAX_IRQ_SRC_ID 0x100
#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
+#define AMDGPU_IRQ_CLIENTID_LEGACY 0
+#define AMDGPU_IRQ_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+
+#define AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW 4
+
struct amdgpu_device;
-struct amdgpu_iv_entry;
enum amdgpu_interrupt_state {
AMDGPU_IRQ_STATE_DISABLE,
AMDGPU_IRQ_STATE_ENABLE,
};
+struct amdgpu_iv_entry {
+ unsigned client_id;
+ unsigned src_id;
+ unsigned ring_id;
+ unsigned vmid;
+ unsigned vmid_src;
+ uint64_t timestamp;
+ unsigned timestamp_src;
+ unsigned pasid;
+ unsigned pasid_src;
+ unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
+ const uint32_t *iv_entry;
+};
+
struct amdgpu_irq_src {
unsigned num_types;
atomic_t *enabled_types;
@@ -63,7 +82,7 @@ struct amdgpu_irq {
bool installed;
spinlock_t lock;
/* interrupt sources */
- struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
+ struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX];
/* status, etc. */
bool msi_enabled; /* msi enabled */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a64056dadc58..81732a84c2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -40,6 +40,30 @@
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
+static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
+{
+ struct amdgpu_gpu_instance *gpu_instance;
+ int i;
+
+ mutex_lock(&mgpu_info.mutex);
+
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev == adev) {
+ mgpu_info.gpu_ins[i] =
+ mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
+ mgpu_info.num_gpu--;
+ if (adev->flags & AMD_IS_APU)
+ mgpu_info.num_apu--;
+ else
+ mgpu_info.num_dgpu--;
+ break;
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+}
+
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
@@ -55,6 +79,8 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev == NULL)
return;
+ amdgpu_unregister_gpu_instance(adev);
+
if (adev->rmmio == NULL)
goto done_free;
@@ -75,6 +101,31 @@ done_free:
dev->dev_private = NULL;
}
+static void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
+{
+ struct amdgpu_gpu_instance *gpu_instance;
+
+ mutex_lock(&mgpu_info.mutex);
+
+ if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
+ DRM_ERROR("Cannot register more gpu instance\n");
+ mutex_unlock(&mgpu_info.mutex);
+ return;
+ }
+
+ gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
+ gpu_instance->adev = adev;
+ gpu_instance->mgpu_fan_enabled = 0;
+
+ mgpu_info.num_gpu++;
+ if (adev->flags & AMD_IS_APU)
+ mgpu_info.num_apu++;
+ else
+ mgpu_info.num_dgpu++;
+
+ mutex_unlock(&mgpu_info.mutex);
+}
+
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
@@ -169,6 +220,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
+ amdgpu_register_gpu_instance(adev);
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8c334fc808c2..94055a485e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1120,12 +1120,19 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 value;
+ u32 pwm_mode;
/* Can't adjust fan when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
+ pr_info("manual fan speed control should be enabled first\n");
+ return -EINVAL;
+ }
+
err = kstrtou32(buf, 10, &value);
if (err)
return err;
@@ -1187,6 +1194,148 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return sprintf(buf, "%i\n", speed);
}
+static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 min_rpm = 0;
+ u32 size = sizeof(min_rpm);
+ int r;
+
+ if (!adev->powerplay.pp_funcs->read_sensor)
+ return -EINVAL;
+
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ (void *)&min_rpm, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
+}
+
+static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 max_rpm = 0;
+ u32 size = sizeof(max_rpm);
+ int r;
+
+ if (!adev->powerplay.pp_funcs->read_sensor)
+ return -EINVAL;
+
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ (void *)&max_rpm, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
+}
+
+static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 rpm = 0;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
+ if (err)
+ return err;
+ }
+
+ return sprintf(buf, "%i\n", rpm);
+}
+
+static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 value;
+ u32 pwm_mode;
+
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+ return -ENODATA;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
+ if (err)
+ return err;
+ }
+
+ return count;
+}
+
+static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 pwm_mode = 0;
+
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
+
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
+ return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
+}
+
+static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ int value;
+ u32 pwm_mode;
+
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value == 0)
+ pwm_mode = AMD_FAN_CTRL_AUTO;
+ else if (value == 1)
+ pwm_mode = AMD_FAN_CTRL_MANUAL;
+ else
+ return -EINVAL;
+
+ amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+
+ return count;
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1406,8 +1555,16 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
*
* - pwm1_max: pulse width modulation fan control maximum level (255)
*
+ * - fan1_min: an minimum value Unit: revolution/min (RPM)
+ *
+ * - fan1_max: an maxmum value Unit: revolution/max (RPM)
+ *
* - fan1_input: fan speed in RPM
*
+ * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
+ *
+ * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
+ *
* You can use hwmon tools like sensors to view this information on your system.
*
*/
@@ -1420,6 +1577,10 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
+static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
@@ -1438,6 +1599,10 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_max.dev_attr.attr,
+ &sensor_dev_attr_fan1_target.dev_attr.attr,
+ &sensor_dev_attr_fan1_enable.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
@@ -1456,13 +1621,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
-
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
/* Skip limit attributes if DPM is not enabled */
@@ -1472,7 +1640,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */
@@ -1497,10 +1670,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* hide max/min values if we can't both query and manage the fan */
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+ (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+ return 0;
+
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
@@ -1976,6 +2157,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t value;
+ uint64_t value64;
uint32_t query = 0;
int size;
@@ -2014,6 +2196,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
seq_printf(m, "GPU Load: %u %%\n", value);
seq_printf(m, "\n");
+ /* SMC feature mask */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
+ seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
+
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index bd397d2916fb..25d2f3e757f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -452,8 +452,6 @@ static int psp_hw_fini(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
- amdgpu_ucode_fini_bo(adev);
-
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 44fc665e4577..4caa301ce454 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -77,6 +77,7 @@ struct amdgpu_fence_driver {
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
+ struct timer_list fallback_timer;
unsigned num_fences_mask;
spinlock_t lock;
struct dma_fence **fences;
@@ -96,7 +97,7 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
unsigned flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
+bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d17503f0df8e..500113ec65ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -46,10 +46,6 @@ struct amdgpu_sdma_instance {
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
-#ifdef CONFIG_DRM_AMDGPU_SI
- //SI DMA has a difference trap irq number for the second engine
- struct amdgpu_irq_src trap_irq_1;
-#endif
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 2e87414422f9..e9bf70e2ac51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -103,7 +103,7 @@ TRACE_EVENT(amdgpu_iv,
__entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3];
),
- TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
+ TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
__entry->client_id, __entry->src_id,
__entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pasid,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index b160b958e5fe..f212402570a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/* Copyright Red Hat Inc 2010.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 1fa8bc337859..7b33867036e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -297,10 +297,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
- if (!load_type)
- return AMDGPU_FW_LOAD_DIRECT;
- else
- return AMDGPU_FW_LOAD_SMU;
+ return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10:
case CHIP_RAVEN:
case CHIP_VEGA12:
@@ -423,32 +420,41 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
return 0;
}
-int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
- uint64_t fw_offset = 0;
- int i, err;
- struct amdgpu_firmware_info *ucode = NULL;
- const struct common_firmware_header *header = NULL;
-
- if (!adev->firmware.fw_size) {
- dev_warn(adev->dev, "No ip firmware need to load\n");
- return 0;
- }
-
- if (!adev->in_gpu_reset) {
- err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- &adev->firmware.fw_buf,
- &adev->firmware.fw_buf_mc,
- &adev->firmware.fw_buf_ptr);
- if (err) {
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
+ amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.fw_buf,
+ &adev->firmware.fw_buf_mc,
+ &adev->firmware.fw_buf_ptr);
+ if (!adev->firmware.fw_buf) {
dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
- goto failed;
+ return -ENOMEM;
+ } else if (amdgpu_sriov_vf(adev)) {
+ memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
}
}
+ return 0;
+}
+
+void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
+{
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+ &adev->firmware.fw_buf_mc,
+ &adev->firmware.fw_buf_ptr);
+}
- memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
+int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+{
+ uint64_t fw_offset = 0;
+ int i;
+ struct amdgpu_firmware_info *ucode = NULL;
+ /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
+ if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
+ return 0;
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
* ucode info here
@@ -465,7 +471,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
- header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
@@ -480,33 +485,4 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
}
}
return 0;
-
-failed:
- if (err)
- adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
-
- return err;
-}
-
-int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_firmware_info *ucode = NULL;
-
- if (!adev->firmware.fw_size)
- return 0;
-
- for (i = 0; i < adev->firmware.max_ucodes; i++) {
- ucode = &adev->firmware.ucode[i];
- if (ucode->fw) {
- ucode->mc_addr = 0;
- ucode->kaddr = NULL;
- }
- }
-
- amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
- &adev->firmware.fw_buf_mc,
- &adev->firmware.fw_buf_ptr);
-
- return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 8f3f1117728c..aa6641b944a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -276,8 +276,10 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
int amdgpu_ucode_validate(const struct firmware *fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
-int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
+int amdgpu_ucode_create_bo(struct amdgpu_device *adev);
+void amdgpu_ucode_free_bo(struct amdgpu_device *adev);
enum amdgpu_firmware_load_type
amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0cc5190f4f36..5f3f54073818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
{
int i;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a73674f9a0f5..27da13df2f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -36,6 +36,7 @@
#include "soc15_common.h"
#include "vcn/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_sh_mask.h"
/* 1 second timeout */
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -120,8 +121,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
version_major, version_minor, family_id);
}
- bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
- + AMDGPU_VCN_SESSION_SIZE * 40;
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
@@ -162,11 +162,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
unsigned size;
void *ptr;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.vcpu_bo == NULL)
return 0;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
@@ -212,18 +212,161 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ struct dpg_pause_state *new_state)
+{
+ int ret_code;
+ uint32_t reg_data = 0;
+ uint32_t reg_data2 = 0;
+ struct amdgpu_ring *ring;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
+ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG non-jpeg */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_enc[0];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_enc[1];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg non-jpeg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.fw_based = new_state->fw_based;
+ }
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
+ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* Make sure JPRG Snoop is disabled before sending the pause */
+ reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
+ reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
+
+ /* pause DPG jpeg */
+ reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
+ UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg jpeg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.jpeg = new_state->jpeg;
+ }
+
+ return 0;
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work);
- unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
- unsigned i;
+ unsigned int fences = 0;
+ unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ amdgpu_vcn_pause_dpg_mode(adev, &new_state);
+ }
+
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
@@ -250,6 +393,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
}
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = adev->vcn.pause_state.fw_based;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = adev->vcn.pause_state.jpeg;
+
+ amdgpu_vcn_pause_dpg_mode(adev, &new_state);
+ }
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -264,7 +423,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
@@ -272,11 +431,11 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
amdgpu_ring_write(ring,
- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
@@ -616,7 +775,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r) {
@@ -626,12 +785,12 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
}
amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
@@ -665,7 +824,7 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
ib->ptr[1] = 0xDEADBEEF;
for (i = 2; i < 16; i += 2) {
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
@@ -714,7 +873,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0b0b8638d73f..a0ad19af9080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -24,9 +24,9 @@
#ifndef __AMDGPU_VCN_H__
#define __AMDGPU_VCN_H__
-#define AMDGPU_VCN_STACK_SIZE (200*1024)
-#define AMDGPU_VCN_HEAP_SIZE (256*1024)
-#define AMDGPU_VCN_SESSION_SIZE (50*1024)
+#define AMDGPU_VCN_STACK_SIZE (128*1024)
+#define AMDGPU_VCN_CONTEXT_SIZE (512*1024)
+
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
#define AMDGPU_VCN_MAX_ENC_RINGS 3
@@ -56,6 +56,16 @@ enum engine_status_constants {
UVD_STATUS__RBC_BUSY = 0x1,
};
+enum internal_dpg_state {
+ VCN_DPG_STATE__UNPAUSE = 0,
+ VCN_DPG_STATE__PAUSE,
+};
+
+struct dpg_pause_state {
+ enum internal_dpg_state fw_based;
+ enum internal_dpg_state jpeg;
+};
+
struct amdgpu_vcn {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
@@ -69,6 +79,8 @@ struct amdgpu_vcn {
struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
unsigned num_enc_rings;
+ enum amd_powergating_state cur_state;
+ struct dpg_pause_state pause_state;
};
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index d2469453dca2..79220a91abe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6277,12 +6277,12 @@ static int ci_dpm_sw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 78ab939ae5d8..f41f5f57e9f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2002,6 +2002,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
if (amdgpu_dpm == -1)
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
else
@@ -2014,8 +2016,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
@@ -2023,6 +2023,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
if (amdgpu_dpm == -1)
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
else
@@ -2035,8 +2037,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
@@ -2044,6 +2044,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -2053,8 +2055,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
@@ -2063,6 +2064,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -2072,8 +2075,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 44d10c2172f6..b5775c6a857b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -276,7 +276,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -318,7 +318,7 @@ static int cik_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -332,7 +332,7 @@ static int cik_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -468,8 +468,7 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &cik_ih_funcs;
+ adev->irq.ih_funcs = &cik_ih_funcs;
}
const struct amdgpu_ip_block_version cik_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 154b1499b07e..b918c8886b75 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -970,19 +970,19 @@ static int cik_sdma_sw_init(void *handle)
}
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1370,10 +1370,8 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
@@ -1389,15 +1387,13 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 960c29e17da6..df5ac4d85a00 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -255,7 +255,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -297,7 +297,7 @@ static int cz_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -311,7 +311,7 @@ static int cz_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -449,8 +449,7 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &cz_ih_funcs;
+ adev->irq.ih_funcs = &cz_ih_funcs;
}
const struct amdgpu_ip_block_version cz_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 3916aa6cc4ec..4cfecdce29a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2746,19 +2746,19 @@ static int dce_v10_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
@@ -3570,8 +3570,7 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v10_0_display_funcs;
+ adev->mode_info.funcs = &dce_v10_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 4ffb612a4e53..7c868916d90f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2867,19 +2867,19 @@ static int dce_v11_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
@@ -3702,8 +3702,7 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
+ adev->mode_info.funcs = &dce_v11_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 480c5348a14f..17eaaba36017 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2616,19 +2616,19 @@ static int dce_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
@@ -3376,8 +3376,7 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v6_0_display_funcs;
+ adev->mode_info.funcs = &dce_v6_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 797196476c94..8c0576978d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2643,19 +2643,19 @@ static int dce_v8_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
@@ -3458,8 +3458,7 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v8_0_display_funcs;
+ adev->mode_info.funcs = &dce_v8_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 15257634a53a..fdace004544d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -372,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
if (r)
return r;
@@ -649,8 +649,7 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_virtual_display_funcs;
+ adev->mode_info.funcs = &dce_virtual_display_funcs;
}
static int dce_virtual_pageflip(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index de184a886057..d76eb27945dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1552,7 +1552,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev)
adev->gfx.config.double_offchip_lds_buf = 0;
}
-static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
{
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
@@ -3094,15 +3094,15 @@ static int gfx_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -3175,7 +3175,7 @@ static int gfx_v6_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v6_0_gpu_init(adev);
+ gfx_v6_0_constants_init(adev);
r = gfx_v6_0_rlc_resume(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index fc39ebbc9d9f..0e72bc09939a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1886,14 +1886,14 @@ static void gfx_v7_0_config_init(struct amdgpu_device *adev)
}
/**
- * gfx_v7_0_gpu_init - setup the 3D engine
+ * gfx_v7_0_constants_init - setup the 3D engine
*
* @adev: amdgpu_device pointer
*
- * Configures the 3D engine and tiling configuration
- * registers so that the 3D engine is usable.
+ * init the gfx constants such as the 3D engine, tiling configuration
+ * registers, maximum number of quad pipes, render backends...
*/
-static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
{
u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
u32 tmp;
@@ -4516,18 +4516,18 @@ static int gfx_v7_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -4624,7 +4624,7 @@ static int gfx_v7_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v7_0_gpu_init(adev);
+ gfx_v7_0_constants_init(adev);
/* init rlc */
r = gfx_v7_0_rlc_resume(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 470dc80f4fe7..3d0f277a6523 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
}
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
+ info->fw = adev->gfx.pfp_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
+ info->fw = adev->gfx.me_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
+ info->fw = adev->gfx.ce_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
+ info->fw = adev->gfx.mec_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+ /* we need account JT in */
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
+ if (amdgpu_sriov_vf(adev)) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+ info->fw = adev->gfx.mec_fw;
adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
+ if (adev->gfx.mec2_fw) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
+ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ info->fw = adev->gfx.mec2_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- /* we need account JT in */
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
-
- if (amdgpu_sriov_vf(adev)) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
- info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
- }
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
-
}
out:
@@ -2048,36 +2045,31 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
- /* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
- if (r)
- return r;
-
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
/* Add CP EDC/ECC irq */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
/* SQ interrupts. */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
&adev->gfx.sq_irq);
if (r) {
DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
@@ -3835,7 +3827,7 @@ static void gfx_v8_0_config_init(struct amdgpu_device *adev)
}
}
-static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp, sh_static_mem_cfg;
int i;
@@ -4181,65 +4173,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
udelay(50);
}
-static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_0 *hdr;
- const __le32 *fw_data;
- unsigned i, fw_size;
-
- if (!adev->gfx.rlc_fw)
- return -EINVAL;
-
- hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- amdgpu_ucode_print_rlc_hdr(&hdr->header);
-
- fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-
- WREG32(mmRLC_GPM_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
-
- return 0;
-}
-
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
- int r;
- u32 tmp;
-
gfx_v8_0_rlc_stop(adev);
-
- /* disable CG */
- tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
- tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
- RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
- WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
- if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS12 ||
- adev->asic_type == CHIP_VEGAM) {
- tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
- tmp &= ~0x3;
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
- }
-
- /* disable PG */
- WREG32(mmRLC_PG_CNTL, 0);
-
gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev);
-
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- /* legacy rlc firmware loading */
- r = gfx_v8_0_rlc_load_microcode(adev);
- if (r)
- return r;
- }
-
gfx_v8_0_rlc_start(adev);
return 0;
@@ -4265,63 +4203,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
udelay(50);
}
-static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
-{
- const struct gfx_firmware_header_v1_0 *pfp_hdr;
- const struct gfx_firmware_header_v1_0 *ce_hdr;
- const struct gfx_firmware_header_v1_0 *me_hdr;
- const __le32 *fw_data;
- unsigned i, fw_size;
-
- if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
- return -EINVAL;
-
- pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.pfp_fw->data;
- ce_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.ce_fw->data;
- me_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.me_fw->data;
-
- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
-
- gfx_v8_0_cp_gfx_enable(adev, false);
-
- /* PFP */
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
- WREG32(mmCP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
-
- /* CE */
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
- WREG32(mmCP_CE_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
-
- /* ME */
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
- WREG32(mmCP_ME_RAM_WADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
- WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
-
- return 0;
-}
-
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
@@ -4521,52 +4402,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
udelay(50);
}
-static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
-{
- const struct gfx_firmware_header_v1_0 *mec_hdr;
- const __le32 *fw_data;
- unsigned i, fw_size;
-
- if (!adev->gfx.mec_fw)
- return -EINVAL;
-
- gfx_v8_0_cp_compute_enable(adev, false);
-
- mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
-
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
-
- /* MEC1 */
- WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
- WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
-
- /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
- if (adev->gfx.mec2_fw) {
- const struct gfx_firmware_header_v1_0 *mec2_hdr;
-
- mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
-
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
-
- WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
- WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
- }
-
- return 0;
-}
-
/* KIQ functions */
static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
{
@@ -4892,7 +4727,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -5000,17 +4835,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- /* legacy firmware loading */
- r = gfx_v8_0_cp_gfx_load_microcode(adev);
- if (r)
- return r;
-
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- }
-
r = gfx_v8_0_kiq_resume(adev);
if (r)
return r;
@@ -5039,7 +4863,7 @@ static int gfx_v8_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v8_0_init_golden_registers(adev);
- gfx_v8_0_gpu_init(adev);
+ gfx_v8_0_constants_init(adev);
r = gfx_v8_0_rlc_resume(adev);
if (r)
@@ -5080,6 +4904,55 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
return r;
}
+static bool gfx_v8_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
+ || RREG32(mmGRBM_STATUS2) != 0x8)
+ return false;
+ else
+ return true;
+}
+
+static bool gfx_v8_0_rlc_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (RREG32(mmGRBM_STATUS2) != 0x8)
+ return false;
+ else
+ return true;
+}
+
+static int gfx_v8_0_wait_for_rlc_idle(void *handle)
+{
+ unsigned int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v8_0_rlc_is_idle(handle))
+ return 0;
+
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gfx_v8_0_wait_for_idle(void *handle)
+{
+ unsigned int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v8_0_is_idle(handle))
+ return 0;
+
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -5098,51 +4971,27 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- gfx_v8_0_cp_enable(adev, false);
- gfx_v8_0_rlc_stop(adev);
-
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ if (!gfx_v8_0_wait_for_idle(adev))
+ gfx_v8_0_cp_enable(adev, false);
+ else
+ pr_err("cp is busy, skip halt cp\n");
+ if (!gfx_v8_0_wait_for_rlc_idle(adev))
+ gfx_v8_0_rlc_stop(adev);
+ else
+ pr_err("rlc is busy, skip halt rlc\n");
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
static int gfx_v8_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->gfx.in_suspend = true;
- return gfx_v8_0_hw_fini(adev);
+ return gfx_v8_0_hw_fini(handle);
}
static int gfx_v8_0_resume(void *handle)
{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- r = gfx_v8_0_hw_init(adev);
- adev->gfx.in_suspend = false;
- return r;
-}
-
-static bool gfx_v8_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
- return false;
- else
- return true;
-}
-
-static int gfx_v8_0_wait_for_idle(void *handle)
-{
- unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(handle))
- return 0;
-
- udelay(1);
- }
- return -ETIMEDOUT;
+ return gfx_v8_0_hw_init(handle);
}
static bool gfx_v8_0_check_soft_reset(void *handle)
@@ -7013,52 +6862,6 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned int type,
- enum amdgpu_interrupt_state state)
-{
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
-
- switch (type) {
- case AMDGPU_CP_KIQ_IRQ_DRIVER0:
- WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
- if (ring->me == 1)
- WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
- ring->pipe,
- GENERIC2_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
- else
- WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
- ring->pipe,
- GENERIC2_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
- break;
- default:
- BUG(); /* kiq only support GENERIC2_INT now */
- break;
- }
- return 0;
-}
-
-static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- u8 me_id, pipe_id, queue_id;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
-
- me_id = (entry->ring_id & 0x0c) >> 2;
- pipe_id = (entry->ring_id & 0x03) >> 0;
- queue_id = (entry->ring_id & 0x70) >> 4;
- DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
- me_id, pipe_id, queue_id);
-
- amdgpu_fence_process(ring);
- return 0;
-}
-
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -7209,11 +7012,6 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
.process = gfx_v8_0_priv_inst_irq,
};
-static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
- .set = gfx_v8_0_kiq_set_interrupt_state,
- .process = gfx_v8_0_kiq_irq,
-};
-
static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
.set = gfx_v8_0_set_cp_ecc_int_state,
.process = gfx_v8_0_cp_ecc_error_irq,
@@ -7235,9 +7033,6 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
- adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
-
adev->gfx.cp_ecc_error_irq.num_types = 1;
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f369d9603435..6d7baf59d6e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -97,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
@@ -133,7 +134,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -173,7 +177,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -247,7 +254,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -908,6 +918,50 @@ static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
+static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
+{
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ uint32_t pg_always_on_cu_num = 2;
+ uint32_t always_on_cu_num;
+ uint32_t i, j, k;
+ uint32_t mask, cu_bitmap, counter;
+
+ if (adev->flags & AMD_IS_APU)
+ always_on_cu_num = 4;
+ else if (adev->asic_type == CHIP_VEGA12)
+ always_on_cu_num = 8;
+ else
+ always_on_cu_num = 12;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ cu_bitmap = 0;
+ counter = 0;
+ gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ if (cu_info->bitmap[i][j] & mask) {
+ if (counter == pg_always_on_cu_num)
+ WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
+ if (counter < always_on_cu_num)
+ cu_bitmap |= mask;
+ else
+ break;
+ counter++;
+ }
+ mask <<= 1;
+ }
+
+ WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
+ cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
+ }
+ }
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
{
uint32_t data;
@@ -941,8 +995,59 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
data |= 0x00C00000;
WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
- /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
- WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
+ /*
+ * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
+ * programmed in gfx_v9_0_init_always_on_cu_mask()
+ */
+
+ /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
+ * but used for RLC_LB_CNTL configuration */
+ data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
+ data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
+ data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v9_0_init_always_on_cu_mask(adev);
+}
+
+static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
+
+ /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
+
+ /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
+
+ /* set mmRLC_LB_PARAMS = 0x003F_1006 */
+ data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
+ data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
+ data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
+ WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
+
+ /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
+ data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
+ data &= 0x0000FFFF;
+ data |= 0x00C00000;
+ WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
+
+ /*
+ * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
+ * programmed in gfx_v9_0_init_always_on_cu_mask()
+ */
/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
* but used for RLC_LB_CNTL configuration */
@@ -951,6 +1056,8 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v9_0_init_always_on_cu_mask(adev);
}
static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
@@ -1084,8 +1191,17 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
rv_init_cp_jump_table(adev);
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ }
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
gfx_v9_0_init_lbpw(adev);
+ break;
+ case CHIP_VEGA20:
+ gfx_v9_4_init_lbpw(adev);
+ break;
+ default:
+ break;
}
return 0;
@@ -1605,11 +1721,6 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
- /* KIQ event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
- if (r)
- return r;
-
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
@@ -1847,7 +1958,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
}
-static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
int i;
@@ -2403,7 +2514,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN ||
+ adev->asic_type == CHIP_VEGA20) {
if (amdgpu_lbpw != 0)
gfx_v9_0_enable_lbpw(adev, true);
else
@@ -3091,7 +3203,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3235,7 +3347,7 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_init_golden_registers(adev);
- gfx_v9_0_gpu_init(adev);
+ gfx_v9_0_constants_init(adev);
r = gfx_v9_0_csb_vram_pin(adev);
if (r)
@@ -3310,7 +3422,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_gpu_reset && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
@@ -3330,20 +3442,12 @@ static int gfx_v9_0_hw_fini(void *handle)
static int gfx_v9_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->gfx.in_suspend = true;
- return gfx_v9_0_hw_fini(adev);
+ return gfx_v9_0_hw_fini(handle);
}
static int gfx_v9_0_resume(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
-
- r = gfx_v9_0_hw_init(adev);
- adev->gfx.in_suspend = false;
- return r;
+ return gfx_v9_0_hw_init(handle);
}
static bool gfx_v9_0_is_idle(void *handle)
@@ -4609,68 +4713,6 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned int type,
- enum amdgpu_interrupt_state state)
-{
- uint32_t tmp, target;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
-
- if (ring->me == 1)
- target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
- else
- target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
- target += ring->pipe;
-
- switch (type) {
- case AMDGPU_CP_KIQ_IRQ_DRIVER0:
- if (state == AMDGPU_IRQ_STATE_DISABLE) {
- tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(target, tmp);
- } else {
- tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(target, tmp);
- }
- break;
- default:
- BUG(); /* kiq only support GENERIC2_INT now */
- break;
- }
- return 0;
-}
-
-static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- u8 me_id, pipe_id, queue_id;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
-
- me_id = (entry->ring_id & 0x0c) >> 2;
- pipe_id = (entry->ring_id & 0x03) >> 0;
- queue_id = (entry->ring_id & 0x70) >> 4;
- DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
- me_id, pipe_id, queue_id);
-
- amdgpu_fence_process(ring);
- return 0;
-}
-
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -4819,11 +4861,6 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
}
-static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
- .set = gfx_v9_0_kiq_set_interrupt_state,
- .process = gfx_v9_0_kiq_irq,
-};
-
static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
.set = gfx_v9_0_set_eop_interrupt_state,
.process = gfx_v9_0_eop_irq,
@@ -4849,9 +4886,6 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
-
- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
- adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
}
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
@@ -4871,7 +4905,20 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ adev->gds.mem.total_size = 0x10000;
+ break;
+ case CHIP_RAVEN:
+ adev->gds.mem.total_size = 0x1000;
+ break;
+ default:
+ adev->gds.mem.total_size = 0x10000;
+ break;
+ }
+
adev->gds.gws.total_size = 64;
adev->gds.oa.total_size = 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 65f58ebcf835..ceb7847b504f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -82,7 +82,8 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1);
+ max((adev->gmc.vram_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 4411463ca719..e1c2b4e9c7b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -859,11 +859,11 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1180,8 +1180,7 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
}
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae776ce9a415..910c4ce19cb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -991,11 +991,11 @@ static int gmc_v7_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1388,8 +1388,7 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
}
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 53ae49b8bde8..1d3265c97b70 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1095,11 +1095,11 @@ static int gmc_v8_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1733,8 +1733,7 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
}
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index aad3c7c5fb3a..f35d7a554ad5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -593,8 +593,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
}
static int gmc_v9_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 842c4b677b4d..cf0fc61aebe6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -255,7 +255,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -297,7 +297,7 @@ static int iceland_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -311,7 +311,7 @@ static int iceland_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -447,8 +447,7 @@ static const struct amdgpu_ih_funcs iceland_ih_funcs = {
static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &iceland_ih_funcs;
+ adev->irq.ih_funcs = &iceland_ih_funcs;
}
const struct amdgpu_ip_block_version iceland_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index cb79a93c2eb7..d0e478f43443 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2995,12 +2995,12 @@ static int kv_dpm_sw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 80698b5ffa4a..14649f8475f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -100,7 +100,8 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1);
+ max((adev->gmc.vram_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 842567b53df5..64e875d528dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -580,11 +580,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 9217af00bc8d..3f3fac2d50cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -306,11 +306,8 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c403bdf8ad70..2d4770e173dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -504,41 +504,6 @@ static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-/**
- * sdma_v2_4_load_microcode - load the sDMA ME ucode
- *
- * @adev: amdgpu_device pointer
- *
- * Loads the sDMA0/1 ucode.
- * Returns 0 for success, -EINVAL if the ucode is not available.
- */
-static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
-{
- const struct sdma_firmware_header_v1_0 *hdr;
- const __le32 *fw_data;
- u32 fw_size;
- int i, j;
-
- /* halt the MEs */
- sdma_v2_4_enable(adev, false);
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (!adev->sdma.instance[i].fw)
- return -EINVAL;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- amdgpu_ucode_print_sdma_hdr(&hdr->header);
- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- fw_data = (const __le32 *)
- (adev->sdma.instance[i].fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
- for (j = 0; j < fw_size; j++)
- WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
- }
-
- return 0;
-}
/**
* sdma_v2_4_start - setup and start the async dma engines
@@ -552,13 +517,6 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- }
-
/* halt the engine before programing */
sdma_v2_4_enable(adev, false);
@@ -898,19 +856,19 @@ static int sdma_v2_4_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1296,10 +1254,8 @@ static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
@@ -1315,15 +1271,13 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2677d6a1bf42..6fb3edaba0ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+ info->fw = adev->sdma.instance[i].fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
}
out:
if (err) {
@@ -778,42 +777,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
}
/**
- * sdma_v3_0_load_microcode - load the sDMA ME ucode
- *
- * @adev: amdgpu_device pointer
- *
- * Loads the sDMA0/1 ucode.
- * Returns 0 for success, -EINVAL if the ucode is not available.
- */
-static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
-{
- const struct sdma_firmware_header_v1_0 *hdr;
- const __le32 *fw_data;
- u32 fw_size;
- int i, j;
-
- /* halt the MEs */
- sdma_v3_0_enable(adev, false);
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (!adev->sdma.instance[i].fw)
- return -EINVAL;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- amdgpu_ucode_print_sdma_hdr(&hdr->header);
- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- fw_data = (const __le32 *)
- (adev->sdma.instance[i].fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
- for (j = 0; j < fw_size; j++)
- WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
- }
-
- return 0;
-}
-
-/**
* sdma_v3_0_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
@@ -825,12 +788,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
{
int r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- r = sdma_v3_0_load_microcode(adev);
- if (r)
- return r;
- }
-
/* disable sdma engine before programing it */
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
@@ -1177,19 +1134,19 @@ static int sdma_v3_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1736,10 +1693,8 @@ static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
@@ -1755,15 +1710,13 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2ea1f0d8f5be..04fa3d972636 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -148,6 +148,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -177,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -818,7 +820,7 @@ sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
uint32_t def, data;
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
- /* disable idle interrupt */
+ /* enable idle interrupt */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
@@ -1320,9 +1322,15 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
+ if (adev->asic_type == CHIP_VEGA10)
+ ring->doorbell_index = (i == 0) ?
+ (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
+ : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
+ else
+ ring->doorbell_index = (i == 0) ?
+ (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
+ : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
+
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1358,6 +1366,9 @@ static int sdma_v4_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+
sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev);
@@ -1375,6 +1386,9 @@ static int sdma_v4_0_hw_fini(void *handle)
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
+ if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
+
return 0;
}
@@ -1801,10 +1815,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1820,15 +1832,13 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index c364ef94cc36..f8408f88cd37 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2057,13 +2057,13 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
/* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
@@ -2071,13 +2071,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+
/* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
@@ -2085,11 +2086,11 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index fafaf259b17b..adbaea6da0d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -502,12 +502,14 @@ static int si_dma_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* DMA0 trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
+ &adev->sdma.trap_irq);
if (r)
return r;
/* DMA1 trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
+ &adev->sdma.trap_irq);
if (r)
return r;
@@ -649,17 +651,10 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- amdgpu_fence_process(&adev->sdma.instance[0].ring);
-
- return 0;
-}
-
-static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- amdgpu_fence_process(&adev->sdma.instance[1].ring);
-
+ if (entry->src_id == 224)
+ amdgpu_fence_process(&adev->sdma.instance[0].ring);
+ else
+ amdgpu_fence_process(&adev->sdma.instance[1].ring);
return 0;
}
@@ -786,11 +781,6 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
.process = si_dma_process_trap_irq,
};
-static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
- .set = si_dma_set_trap_irq_state,
- .process = si_dma_process_trap_irq_1,
-};
-
static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
.process = si_dma_process_illegal_inst_irq,
};
@@ -799,7 +789,6 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
- adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
}
@@ -863,10 +852,8 @@ static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &si_dma_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
@@ -882,15 +869,13 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version si_dma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 1de96995e690..da58040fdbdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7687,11 +7687,11 @@ static int si_dpm_sw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 60dad63098a2..b3d7d9f83202 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -142,7 +142,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -170,7 +170,7 @@ static int si_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -182,7 +182,7 @@ static int si_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
}
@@ -308,8 +308,7 @@ static const struct amdgpu_ih_funcs si_ih_funcs = {
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &si_ih_funcs;
+ adev->irq.ih_funcs = &si_ih_funcs;
}
const struct amdgpu_ip_block_version si_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 138c4810a3de..bf5e6a413dee 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -529,6 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
else
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -539,8 +541,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
@@ -551,6 +551,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -560,8 +562,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
default:
@@ -739,7 +739,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
- AMD_PG_SUPPORT_VCN;
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index f5d602540673..958b10a57073 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -57,13 +57,33 @@
loop--; \
if (!loop) { \
DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
- inst, #reg, expected_value, (tmp_ & (mask))); \
+ inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
} \
} \
} while (0)
+#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__MASK_EN_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); })
+
+#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+ do { \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } while (0)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 52853d8a8fdd..3abffd06b5c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -266,7 +266,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -317,7 +317,7 @@ static int tonga_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
if (r)
return r;
@@ -334,7 +334,7 @@ static int tonga_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -513,8 +513,7 @@ static const struct amdgpu_ih_funcs tonga_ih_funcs = {
static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &tonga_ih_funcs;
+ adev->irq.ih_funcs = &tonga_ih_funcs;
}
const struct amdgpu_ip_block_version tonga_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 8a926d1df939..1fc17bf39fed 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -108,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 50248059412e..fde6ad5ac9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -105,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 6ae82cc2e55e..7a5b40275e8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -274,7 +274,7 @@ err:
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
- bool direct, struct dma_fence **fence)
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
@@ -310,11 +310,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct)
- r = amdgpu_job_submit_direct(job, ring, &f);
- else
- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
@@ -345,7 +341,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
goto error;
@@ -393,14 +389,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index a289f6a20b6b..58b39afcfb86 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -280,8 +280,8 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
@@ -317,11 +317,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct)
- r = amdgpu_job_submit_direct(job, ring, &f);
- else
- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
@@ -352,7 +348,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
if (r) {
DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 7eaa54ba016b..ea28828360d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -417,7 +417,7 @@ static int vce_v2_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index c8390f9adfd6..6dbd39730070 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -423,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
int r, i;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 2664bb2c47c3..eae90922fdbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -37,6 +37,11 @@
#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
+#define mmUVD_REG_XX_MASK 0x05ac
+#define mmUVD_REG_XX_MASK_BASE_IDX 1
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -198,7 +203,8 @@ static int vcn_v1_0_hw_init(void *handle)
done:
if (!r)
- DRM_INFO("VCN decode and encode initialized successfully.\n");
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
return r;
}
@@ -266,17 +272,18 @@ static int vcn_v1_0_resume(void *handle)
}
/**
- * vcn_v1_0_mc_resume - memory controller programming
+ * vcn_v1_0_mc_resume_spg_mode - memory controller programming
*
* @adev: amdgpu_device pointer
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
+ /* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
@@ -296,20 +303,21 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
+ /* cache window 1: stack */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+ /* cache window 2: context */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
- AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
@@ -317,6 +325,96 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config);
WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+}
+
+static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+{
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
+ 0xFFFFFFFF, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+ offset = size;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
+ }
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
+
+ /* cache window 1: stack */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
+ 0xFFFFFFFF, 0);
+
+ /* cache window 2: context */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
+ 0xFFFFFFFF, 0);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
}
/**
@@ -519,6 +617,60 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
+static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
+{
+ uint32_t reg_data = 0;
+
+ /* disable JPEG CGC */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
+
+ /* enable sw clock gating control */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__SCPU_MODE_MASK);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
+
+ /* turn off clock gating */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
+
+ /* turn on SUVD clock gating */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
+
+ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
+}
+
static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
@@ -614,7 +766,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
*
* Setup and start the VCN block
*/
-static int vcn_v1_0_start(struct amdgpu_device *adev)
+static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
uint32_t rb_bufsz, tmp;
@@ -625,41 +777,24 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
lmi_swap_cntl = 0;
vcn_1_0_disable_static_power_gating(adev);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
+
/* disable clock gating */
vcn_v1_0_disable_clock_gating(adev);
- vcn_v1_0_mc_resume(adev);
-
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* stall UMC and register bus before resetting VCPU */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- mdelay(1);
-
- /* put LMI, VCPU, RBC etc... into reset */
- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
- UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
- UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
- UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
- UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
- mdelay(5);
-
/* initialize VCN memory controller */
- WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
- (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__REQ_MODE_MASK |
- 0x00100000L);
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
@@ -667,41 +802,61 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
#endif
WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
- WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
+
+ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
- /* take all subblocks out of reset, except VCPU */
- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
- mdelay(5);
+ WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v1_0_mc_resume_spg_mode(adev);
+
+ WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
+ RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
/* enable VCPU clock */
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
- UVD_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* boot up the VCPU */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
+ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
/* enable UMC */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- /* boot up the VCPU */
- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
- mdelay(10);
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
- if (status & 2)
+ if (status & UVD_STATUS__IDLE)
break;
mdelay(10);
}
r = 0;
- if (status & 2)
+ if (status & UVD_STATUS__IDLE)
break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
@@ -721,24 +876,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
}
/* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
- (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
- ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+ UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
/* enable system interrupt for JRBC, TODO: move to set interrupt*/
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
- /* clear the bit 4 of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of UVD_STATUS */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
@@ -759,6 +912,8 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
+
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
@@ -782,12 +937,13 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
ring = &adev->vcn.ring_jpeg;
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
/* initialize wptr */
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
@@ -799,6 +955,166 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ uint32_t rb_bufsz, tmp;
+ uint32_t lmi_swap_cntl;
+
+ /* disable byte swapping */
+ lmi_swap_cntl = 0;
+
+ vcn_1_0_enable_static_power_gating(adev);
+
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
+
+ /* enable clock gating */
+ vcn_v1_0_clock_gating_dpg_mode(adev, 0);
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
+
+ /* disable interupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+ 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+ /* initialize VCN memory controller */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ 0x00100000L, 0xFFFFFFFF, 0);
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+#endif
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL,
+ 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
+
+ vcn_v1_0_mc_resume_dpg_mode(adev);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
+
+ /* boot up the VCPU */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
+
+ /* enable UMC */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
+ 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
+ 0xFFFFFFFF, 0);
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+ UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+ vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ /* setup mmUVD_LMI_CTRL */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ 0x00100000L, 0xFFFFFFFF, 1);
+
+ tmp = adev->gfx.config.gb_addr_config;
+ /* setup VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
+ UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
+
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* set the write pointer delay */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
+ (upper_32_bits(ring->gpu_addr) >> 2));
+
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
+
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
+ ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
+ return 0;
+}
+
+static int vcn_v1_0_start(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ r = vcn_v1_0_start_dpg_mode(adev);
+ else
+ r = vcn_v1_0_start_spg_mode(adev);
+ return r;
+}
+
/**
* vcn_v1_0_stop - stop VCN block
*
@@ -806,41 +1122,90 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
*
* stop the VCN block
*/
-static int vcn_v1_0_stop(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
{
- /* force RBC into idle state */
- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
+ int ret_code, tmp;
- /* Stall UMC and register bus before resetting VCPU */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
- UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- mdelay(1);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code);
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
/* put VCPU into reset */
- WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
- mdelay(5);
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
/* disable VCPU clock */
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__CLK_EN_MASK);
- /* Unstall UMC and register bus */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ /* reset LMI UMC/LMI */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
+ ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
+ ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
- WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
vcn_v1_0_enable_clock_gating(adev);
vcn_1_0_enable_static_power_gating(adev);
return 0;
}
+static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+{
+ int ret_code = 0;
+
+ /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ /* wait for read ptr to be equal to write ptr */
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int vcn_v1_0_stop(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ r = vcn_v1_0_stop_dpg_mode(adev);
+ else
+ r = vcn_v1_0_stop_spg_mode(adev);
+
+ return r;
+}
+
static bool vcn_v1_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
+ return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
static int vcn_v1_0_wait_for_idle(void *handle)
@@ -848,7 +1213,8 @@ static int vcn_v1_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
- SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
+ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE, ret);
return ret;
}
@@ -910,6 +1276,10 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr) | 0x80000000);
+
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
@@ -1633,12 +2003,20 @@ static int vcn_v1_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
+ int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if(state == adev->vcn.cur_state)
+ return 0;
+
if (state == AMD_PG_STATE_GATE)
- return vcn_v1_0_stop(adev);
+ ret = vcn_v1_0_stop(adev);
else
- return vcn_v1_0_start(adev);
+ ret = vcn_v1_0_start(adev);
+
+ if(!ret)
+ adev->vcn.cur_state = state;
+ return ret;
}
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index acbe5a770207..a99f71797aa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -380,7 +380,7 @@ static int vega10_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
if (r)
return r;
@@ -397,7 +397,7 @@ static int vega10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
}
@@ -494,8 +494,7 @@ static const struct amdgpu_ih_funcs vega10_ih_funcs = {
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &vega10_ih_funcs;
+ adev->irq.ih_funcs = &vega10_ih_funcs;
}
const struct amdgpu_ip_block_version vega10_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 88b57a5e9489..07880d35e9de 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1596,16 +1596,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
break;
case CHIP_FIJI:
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -1615,8 +1617,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
@@ -1626,6 +1626,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -1635,8 +1637,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
@@ -1649,6 +1649,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -1658,8 +1660,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@@ -1667,6 +1667,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -1676,8 +1678,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@@ -1688,6 +1688,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -1697,8 +1699,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 758398bdb39b..14d5b5fa822d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -447,6 +447,24 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
return retval;
}
+static int kfd_ioctl_get_queue_wave_state(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_queue_wave_state_args *args = data;
+ int r;
+
+ mutex_lock(&p->mutex);
+
+ r = pqm_get_wave_state(&p->pqm, args->queue_id,
+ (void __user *)args->ctl_stack_address,
+ &args->ctl_stack_used_size,
+ &args->save_area_used_size);
+
+ mutex_unlock(&p->mutex);
+
+ return r;
+}
+
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1615,6 +1633,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
kfd_ioctl_set_cu_mask, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
+ kfd_ioctl_get_queue_wave_state, 0)
+
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index d4560f1869bd..56412b0e7e1c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -647,6 +647,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9b4e6ad4a7df..a9f18ea7e354 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -53,6 +53,7 @@ static const struct kfd_device_info kaveri_device_info = {
.needs_iommu_device = true,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -69,6 +70,7 @@ static const struct kfd_device_info carrizo_device_info = {
.needs_iommu_device = true,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info raven_device_info = {
@@ -84,6 +86,7 @@ static const struct kfd_device_info raven_device_info = {
.needs_iommu_device = true,
.needs_pci_atomics = true,
.num_sdma_engines = 1,
+ .num_sdma_queues_per_engine = 2,
};
#endif
@@ -101,6 +104,7 @@ static const struct kfd_device_info hawaii_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info tonga_device_info = {
@@ -116,21 +120,7 @@ static const struct kfd_device_info tonga_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
-};
-
-static const struct kfd_device_info tonga_vf_device_info = {
- .asic_family = CHIP_TONGA,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info fiji_device_info = {
@@ -146,6 +136,7 @@ static const struct kfd_device_info fiji_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info fiji_vf_device_info = {
@@ -161,6 +152,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
@@ -177,6 +169,7 @@ static const struct kfd_device_info polaris10_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info polaris10_vf_device_info = {
@@ -192,6 +185,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info polaris11_device_info = {
@@ -207,6 +201,7 @@ static const struct kfd_device_info polaris11_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info vega10_device_info = {
@@ -222,6 +217,7 @@ static const struct kfd_device_info vega10_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
static const struct kfd_device_info vega10_vf_device_info = {
@@ -237,8 +233,24 @@ static const struct kfd_device_info vega10_vf_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info vega20_device_info = {
+ .asic_family = CHIP_VEGA20,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 8,
+};
struct kfd_deviceid {
unsigned short did;
@@ -293,7 +305,6 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6928, &tonga_device_info }, /* Tonga */
{ 0x6929, &tonga_device_info }, /* Tonga */
{ 0x692B, &tonga_device_info }, /* Tonga */
- { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
{ 0x6938, &tonga_device_info }, /* Tonga */
{ 0x6939, &tonga_device_info }, /* Tonga */
{ 0x7300, &fiji_device_info }, /* Fiji */
@@ -328,6 +339,12 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6868, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
{ 0x687F, &vega10_device_info }, /* Vega10 */
+ { 0x66a0, &vega20_device_info }, /* Vega20 */
+ { 0x66a1, &vega20_device_info }, /* Vega20 */
+ { 0x66a2, &vega20_device_info }, /* Vega20 */
+ { 0x66a3, &vega20_device_info }, /* Vega20 */
+ { 0x66a7, &vega20_device_info }, /* Vega20 */
+ { 0x66af, &vega20_device_info } /* Vega20 */
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -366,6 +383,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
return NULL;
}
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+ return NULL;
+
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
@@ -377,12 +398,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics\n",
pdev->vendor, pdev->device);
+ kfree(kfd);
return NULL;
- }
-
- kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
- if (!kfd)
- return NULL;
+ } else if (!ret)
+ kfd->pci_atomic_requested = true;
kfd->kgd = kgd;
kfd->device_info = device_info;
@@ -419,6 +438,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
{
unsigned int size;
+ kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ KGD_ENGINE_MEC1);
+ kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ec0d62a16e53..a3b933967171 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -109,7 +109,7 @@ static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return dqm->dev->device_info->num_sdma_engines
- * KFD_SDMA_QUEUES_PER_ENGINE;
+ * dqm->dev->device_info->num_sdma_queues_per_engine;
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- int retval;
struct mqd_manager *mqd_mgr;
+ int retval;
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
- &q->properties, q->process->mm);
+ if (WARN(q->process->mm != current->mm,
+ "should only run in user thread"))
+ retval = -EFAULT;
+ else
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, current->mm);
if (retval)
goto out_uninit_mqd;
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
- &q->properties, q->process->mm);
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ if (WARN(q->process->mm != current->mm,
+ "should only run in user thread"))
+ retval = -EFAULT;
+ else
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+ q->pipe, q->queue,
+ &q->properties, current->mm);
+ }
out_unlock:
dqm_unlock(dqm);
@@ -653,10 +663,11 @@ out:
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
+ struct mm_struct *mm = NULL;
struct queue *q;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
- uint32_t pd_base;
+ uint64_t pd_base;
int retval = 0;
pdd = qpd_to_pdd(qpd);
@@ -676,7 +687,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
- pr_debug("Updated PD address to 0x%08x\n", pd_base);
+ pr_debug("Updated PD address to 0x%llx\n", pd_base);
if (!list_empty(&qpd->queues_list)) {
dqm->dev->kfd2kgd->set_vm_context_page_table_base(
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
kfd_flush_tlb(pdd);
}
+ /* Take a safe reference to the mm_struct, which may otherwise
+ * disappear even while the kfd_process is still referenced.
+ */
+ mm = get_task_mm(pdd->process->lead_thread);
+ if (!mm) {
+ retval = -EFAULT;
+ goto out;
+ }
+
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.is_evicted = false;
q->properties.is_active = true;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
- q->queue, &q->properties,
- q->process->mm);
+ q->queue, &q->properties, mm);
if (retval)
goto out;
dqm->queue_count++;
}
qpd->evicted = 0;
out:
+ if (mm)
+ mmput(mm);
dqm_unlock(dqm);
return retval;
}
@@ -717,7 +738,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
{
struct queue *q;
struct kfd_process_device *pdd;
- uint32_t pd_base;
+ uint64_t pd_base;
int retval = 0;
pdd = qpd_to_pdd(qpd);
@@ -737,7 +758,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
- pr_debug("Updated PD address to 0x%08x\n", pd_base);
+ pr_debug("Updated PD address to 0x%llx\n", pd_base);
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
@@ -761,7 +782,7 @@ static int register_process(struct device_queue_manager *dqm,
{
struct device_process_node *n;
struct kfd_process_device *pdd;
- uint32_t pd_base;
+ uint64_t pd_base;
int retval;
n = kzalloc(sizeof(*n), GFP_KERNEL);
@@ -779,6 +800,7 @@ static int register_process(struct device_queue_manager *dqm,
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
+ pr_debug("Updated PD address to 0x%llx\n", pd_base);
retval = dqm->asic_ops.update_qpd(dqm, qpd);
@@ -1342,9 +1364,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
{
int retval;
struct mqd_manager *mqd_mgr;
- bool preempt_all_queues;
-
- preempt_all_queues = false;
retval = 0;
@@ -1528,6 +1547,41 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
return retval;
}
+static int get_wave_state(struct device_queue_manager *dqm,
+ struct queue *q,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+{
+ struct mqd_manager *mqd;
+ int r;
+
+ dqm_lock(dqm);
+
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.is_active || !q->device->cwsr_enabled) {
+ r = -EINVAL;
+ goto dqm_unlock;
+ }
+
+ mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd) {
+ r = -ENOMEM;
+ goto dqm_unlock;
+ }
+
+ if (!mqd->get_wave_state) {
+ r = -EINVAL;
+ goto dqm_unlock;
+ }
+
+ r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
+ save_area_used_size);
+
+dqm_unlock:
+ dqm_unlock(dqm);
+ return r;
+}
static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
@@ -1649,6 +1703,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.process_termination = process_termination_cpsch;
dqm->ops.evict_process_queues = evict_process_queues_cpsch;
dqm->ops.restore_process_queues = restore_process_queues_cpsch;
+ dqm->ops.get_wave_state = get_wave_state;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
@@ -1668,6 +1723,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
dqm->ops.restore_process_queues =
restore_process_queues_nocpsch;
+ dqm->ops.get_wave_state = get_wave_state;
break;
default:
pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
@@ -1695,6 +1751,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
@@ -1806,7 +1863,9 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
- for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
+ for (queue = 0;
+ queue < dqm->dev->device_info->num_sdma_queues_per_engine;
+ queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->kgd, pipe, queue, &dump, &n_regs);
if (r)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 00da3169a004..70e38a2e23b9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -33,7 +33,6 @@
#define KFD_UNMAP_LATENCY_MS (4000)
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
-#define KFD_SDMA_QUEUES_PER_ENGINE (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -82,6 +81,8 @@ struct device_process_node {
*
* @restore_process_queues: Restore all evicted queues queues of a process
*
+ * @get_wave_state: Retrieves context save state and optionally copies the
+ * control stack, if kept in the MQD, to the given userspace address.
*/
struct device_queue_manager_ops {
@@ -137,6 +138,12 @@ struct device_queue_manager_ops {
struct qcm_process_device *qpd);
int (*restore_process_queues)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
+
+ int (*get_wave_state)(struct device_queue_manager *dqm,
+ struct queue *q,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
};
struct device_queue_manager_asic_ops {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 97d5423c5673..3d66cec414af 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -400,6 +400,7 @@ int kfd_init_apertures(struct kfd_process *process)
kfd_init_apertures_vi(pdd, id);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
kfd_init_apertures_v9(pdd, id);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9f84b4d9fb88..6c31f7370193 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -322,6 +322,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
kernel_queue_init_v9(&kq->ops_asic_specific);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
index 684a3bf07efd..33830b1a5a54 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
@@ -71,8 +71,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
uint32_t *buffer, struct qcm_process_device *qpd)
{
struct pm4_mes_map_process *packet;
- uint64_t vm_page_table_base_addr =
- (uint64_t)(qpd->page_table_base) << 12;
+ uint64_t vm_page_table_base_addr = qpd->page_table_base;
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 3bc25ab84f34..e33019a7a883 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -39,6 +39,7 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
case CHIP_POLARIS11:
return mqd_manager_init_vi_tonga(type, dev);
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
return mqd_manager_init_v9(type, dev);
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 4e84052d4e21..f8261313ae7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -43,6 +43,9 @@
*
* @is_occupied: Checks if the relevant HQD slot is occupied.
*
+ * @get_wave_state: Retrieves context save state and optionally copies the
+ * control stack, if kept in the MQD, to the given userspace address.
+ *
* @mqd_mutex: Mqd manager mutex.
*
* @dev: The kfd device structure coupled with this module.
@@ -85,6 +88,11 @@ struct mqd_manager {
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
+ int (*get_wave_state)(struct mqd_manager *mm, void *mqd,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
+
#if defined(CONFIG_DEBUG_FS)
int (*debugfs_show_mqd)(struct seq_file *m, void *data);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 0cedb37cf513..f381c1cb27bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -266,6 +266,28 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
pipe_id, queue_id);
}
+static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+{
+ struct v9_mqd *m;
+
+ /* Control stack is located one page after MQD. */
+ void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
+
+ m = get_mqd(mqd);
+
+ *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
+ m->cp_hqd_cntl_stack_offset;
+ *save_area_used_size = m->cp_hqd_wg_state_offset;
+
+ if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
+ return -EFAULT;
+
+ return 0;
+}
+
static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -435,6 +457,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index b81fda3754da..6469b3456f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -269,6 +269,28 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
pipe_id, queue_id);
}
+static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+{
+ struct vi_mqd *m;
+
+ m = get_mqd(mqd);
+
+ *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
+ m->cp_hqd_cntl_stack_offset;
+ *save_area_used_size = m->cp_hqd_wg_state_offset -
+ m->cp_hqd_cntl_stack_size;
+
+ /* Control stack is not copied to user mode for GFXv8 because
+ * it's part of the context save area that is already
+ * accessible to user mode
+ */
+
+ return 0;
+}
+
static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -436,6 +458,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+ mqd->get_wave_state = get_wave_state;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 1092631765cb..c6080ed3b6a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -229,6 +229,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
pm->pmf = &kfd_vi_pm_funcs;
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
pm->pmf = &kfd_v9_pm_funcs;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b0064b08aa11..53ff86d45d91 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -176,6 +176,7 @@ struct kfd_device_info {
bool needs_iommu_device;
bool needs_pci_atomics;
unsigned int num_sdma_engines;
+ unsigned int num_sdma_queues_per_engine;
};
struct kfd_mem_obj {
@@ -247,6 +248,10 @@ struct kfd_dev {
/* Debug manager */
struct kfd_dbgmgr *dbgmgr;
+ /* Firmware versions */
+ uint16_t mec_fw_version;
+ uint16_t sdma_fw_version;
+
/* Maximum process number mapped to HW scheduler */
unsigned int max_proc_per_quantum;
@@ -257,6 +262,8 @@ struct kfd_dev {
/* xGMI */
uint64_t hive_id;
+
+ bool pci_atomic_requested;
};
/* KGD2KFD callbacks */
@@ -500,11 +507,11 @@ struct qcm_process_device {
* All the memory management data should be here too
*/
uint64_t gds_context_area;
+ uint64_t page_table_base;
uint32_t sh_mem_config;
uint32_t sh_mem_bases;
uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit;
- uint32_t page_table_base;
uint32_t gds_size;
uint32_t num_gws;
uint32_t num_oac;
@@ -856,6 +863,11 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
+int pqm_get_wave_state(struct process_queue_manager *pqm,
+ unsigned int qid,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c8cad9c078ae..fcaaf93681ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -408,6 +408,28 @@ struct kernel_queue *pqm_get_kernel_queue(
return NULL;
}
+int pqm_get_wave_state(struct process_queue_manager *pqm,
+ unsigned int qid,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+{
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("amdkfd: No queue %d exists for operation\n",
+ qid);
+ return -EFAULT;
+ }
+
+ return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
+ pqn->q,
+ ctl_stack,
+ ctl_stack_used_size,
+ save_area_used_size);
+}
+
#if defined(CONFIG_DEBUG_FS)
int pqm_debugfs_mqds(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 0dff66be8d7a..e3843c5929ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -482,11 +482,11 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
(unsigned long long int) 0);
sysfs_show_32bit_prop(buffer, "fw_version",
- dev->gpu->kfd2kgd->get_fw_version(
- dev->gpu->kgd,
- KGD_ENGINE_MEC1));
+ dev->gpu->mec_fw_version);
sysfs_show_32bit_prop(buffer, "capability",
dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, "sdma_fw_version",
+ dev->gpu->sdma_fw_version);
}
return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
@@ -1127,17 +1127,40 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
{
- struct kfd_iolink_properties *link;
+ struct kfd_iolink_properties *link, *cpu_link;
+ struct kfd_topology_device *cpu_dev;
+ uint32_t cap;
+ uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED;
+ uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED;
if (!dev || !dev->gpu)
return;
- /* GPU only creates direck links so apply flags setting to all */
- if (dev->gpu->device_info->asic_family == CHIP_HAWAII)
- list_for_each_entry(link, &dev->io_link_props, list)
- link->flags = CRAT_IOLINK_FLAGS_ENABLED |
- CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
- CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+ pcie_capability_read_dword(dev->gpu->pdev,
+ PCI_EXP_DEVCAP2, &cap);
+
+ if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
+ cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+
+ if (!dev->gpu->pci_atomic_requested ||
+ dev->gpu->device_info->asic_family == CHIP_HAWAII)
+ flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+
+ /* GPU only creates direct links so apply flags setting to all */
+ list_for_each_entry(link, &dev->io_link_props, list) {
+ link->flags = flag;
+ cpu_dev = kfd_topology_device_by_proximity_domain(
+ link->node_to);
+ if (cpu_dev) {
+ list_for_each_entry(cpu_link,
+ &cpu_dev->io_link_props, list)
+ if (cpu_link->node_to == link->node_from)
+ cpu_link->flags = cpu_flag;
+ }
+ }
}
int kfd_topology_add_device(struct kfd_dev *gpu)
@@ -1255,6 +1278,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2be1e3033ce4..e5294d1a3049 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -338,14 +338,6 @@ static int dm_set_powergating_state(void *handle,
/* Prototypes of private functions */
static int dm_early_init(void* handle);
-static void hotplug_notify_work_func(struct work_struct *work)
-{
- struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
- struct drm_device *dev = dm->ddev;
-
- drm_kms_helper_hotplug_event(dev);
-}
-
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
@@ -447,8 +439,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
- INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
-
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -728,6 +718,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
return NULL;
}
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink != NULL)
+ dc_sink_retain(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ return;
+ }
+
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ DC_ERROR("Failed to read EDID");
+
+}
+
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -741,6 +812,7 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
int ret;
int i;
@@ -771,7 +843,13 @@ static int dm_resume(void *handle)
continue;
mutex_lock(&aconnector->hpd_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none)
+ emulated_link_detect(aconnector->dc_link);
+ else
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
@@ -1020,6 +1098,7 @@ static void handle_hpd_irq(void *param)
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
/*
* In case of failure or MST no need to update connector status or notify the OS
@@ -1030,7 +1109,21 @@ static void handle_hpd_irq(void *param)
if (aconnector->fake_enable)
aconnector->fake_enable = false;
- if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+
+ } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -1130,6 +1223,7 @@ static void handle_hpd_rx_irq(void *param)
struct drm_device *dev = connector->dev;
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1142,7 +1236,24 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
- if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1214,7 +1325,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 ||
@@ -1539,6 +1650,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
int32_t total_overlay_planes, total_primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1605,7 +1717,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
- if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+ if (!dc_link_detect_sink(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
}
@@ -2648,7 +2767,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
finish:
- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
return stream;
@@ -4079,6 +4198,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* TODO eliminate or rename surface_update */
struct dc_surface_update surface_updates[1] = { {0} };
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+ struct dc_stream_status *stream_status;
/* Prepare wait for target vblank early - before the fence-waits */
@@ -4134,7 +4254,19 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+ stream_status = dc_stream_get_status(acrtc_state->stream);
+ if (!stream_status) {
+ DRM_ERROR("No stream status for CRTC: id=%d\n",
+ acrtc->crtc_id);
+ return;
+ }
+
+ surface_updates->surface = stream_status->plane_states[0];
+ if (!surface_updates->surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc->crtc_id);
+ return;
+ }
surface_updates->flip_addr = &addr;
dc_commit_updates_for_stream(adev->dm.dc,
@@ -4608,12 +4740,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- /* Signal HW programming completion */
- drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank)
drm_atomic_helper_wait_for_flip_done(dev, state);
+ /*
+ * FIXME:
+ * Delay hw_done() until flip_done() is signaled. This is to block
+ * another commit from freeing the CRTC state while we're still
+ * waiting on flip_done.
+ */
+ drm_atomic_helper_commit_hw_done(state);
+
drm_atomic_helper_cleanup_planes(dev, state);
/*
@@ -4797,6 +4935,8 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
mod_freesync_build_vrr_infopacket(dm->freesync_module,
new_stream,
&vrr,
+ packet_type_fs1,
+ NULL,
&vrr_infopacket);
new_crtc_state->adjust = vrr.adjust;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d4f1bdf93207..978b34a5011c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -108,8 +108,6 @@ struct amdgpu_display_manager {
const struct dc_link *backlight_link;
- struct work_struct mst_hotplug_work;
-
struct mod_freesync *freesync_module;
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0ef4a40d2247..9a7ac58eb18e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -705,7 +705,8 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector)
int i;
struct dentry *ent, *dir = connector->base.debugfs_entry;
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
ent = debugfs_create_file(dp_debugfs_entries[i].name,
0644,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a910f01838ab..a212178f2edc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -36,17 +36,13 @@
* Private declarations.
*****************************************************************************/
-struct handler_common_data {
+struct amdgpu_dm_irq_handler_data {
struct list_head list;
interrupt_handler handler;
void *handler_arg;
/* DM which this handler belongs to */
struct amdgpu_display_manager *dm;
-};
-
-struct amdgpu_dm_irq_handler_data {
- struct handler_common_data hcd;
/* DAL irq source which registered for this interrupt. */
enum dc_irq_source irq_source;
};
@@ -61,7 +57,7 @@ struct amdgpu_dm_irq_handler_data {
* Private functions.
*****************************************************************************/
-static void init_handler_common_data(struct handler_common_data *hcd,
+static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
void (*ih)(void *),
void *args,
struct amdgpu_display_manager *dm)
@@ -85,11 +81,9 @@ static void dm_irq_work_func(struct work_struct *work)
struct amdgpu_dm_irq_handler_data *handler_data;
list_for_each(entry, handler_list) {
- handler_data =
- list_entry(
- entry,
- struct amdgpu_dm_irq_handler_data,
- hcd.list);
+ handler_data = list_entry(entry,
+ struct amdgpu_dm_irq_handler_data,
+ list);
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
handler_data->irq_source);
@@ -97,7 +91,7 @@ static void dm_irq_work_func(struct work_struct *work)
DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
handler_data->irq_source);
- handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ handler_data->handler(handler_data->handler_arg);
}
/* Call a DAL subcomponent which registered for interrupt notification
@@ -137,11 +131,11 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
list_for_each_safe(entry, tmp, hnd_list) {
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
- hcd.list);
+ list);
if (ih == handler) {
/* Found our handler. Remove it from the list. */
- list_del(&handler->hcd.list);
+ list_del(&handler->list);
handler_removed = true;
break;
}
@@ -230,8 +224,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
memset(handler_data, 0, sizeof(*handler_data));
- init_handler_common_data(&handler_data->hcd, ih, handler_args,
- &adev->dm);
+ init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
irq_source = int_params->irq_source;
@@ -250,7 +243,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
break;
}
- list_add_tail(&handler_data->hcd.list, hnd_list);
+ list_add_tail(&handler_data->list, hnd_list);
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
@@ -462,15 +455,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
entry,
&adev->dm.irq_handler_list_high_tab[irq_source]) {
- handler_data =
- list_entry(
- entry,
- struct amdgpu_dm_irq_handler_data,
- hcd.list);
+ handler_data = list_entry(entry,
+ struct amdgpu_dm_irq_handler_data,
+ list);
/* Call a subcomponent which registered for immediate
* interrupt notification */
- handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ handler_data->handler(handler_data->handler_arg);
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 6d16b4a0353d..0fab64a2a915 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -105,6 +105,8 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
+
+ amdgpu_pm_compute_clocks(adev);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 5e2ea12fbb73..d0fc54f8fb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -1625,11 +1625,11 @@ void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performan
else {
v->dsty_after_scaler = 0.0;
}
- v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
- v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
- v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
- v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k];
+ v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
+ v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
+ v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k];
v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
if (v->prefetch_mode == 0.0) {
v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 80ec09eef44f..3208188b7ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1096,9 +1096,9 @@ bool dcn_validate_bandwidth(
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
continue;
- pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
- pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
- pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx];
pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1137,9 +1137,9 @@ bool dcn_validate_bandwidth(
TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* update previously split pipe */
- hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
- hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
- hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx];
hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1c438eedf77a..7c491c91465f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -60,6 +60,7 @@
#define DC_LOGGER \
dc->ctx->logger
+const static char DC_BUILD_ID[] = "production-build";
/*******************************************************************************
* Private functions
@@ -460,9 +461,25 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link *link)
{
+ int i;
+ struct pipe_ctx *pipe;
+ struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
- struct dc_stream_state *link_stream =
- link->dc->current_state->res_ctx.pipe_ctx[0].stream;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->sink
+ && pipe->stream->sink->link) {
+ if (pipe->stream->sink->link == link)
+ break;
+ }
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return;
+
+ link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
link->preferred_link_setting = store_settings;
if (link_stream)
@@ -742,6 +759,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->config = init_params->flags;
+ dc->build_id = DC_BUILD_ID;
+
DC_LOG_DC("Display Core initialized\n");
@@ -1094,32 +1113,6 @@ static bool is_surface_in_context(
return false;
}
-static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
-{
- switch (format) {
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
- return 12;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
- return 16;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
- return 32;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
- return 64;
- default:
- ASSERT_CRITICAL(false);
- return -1;
- }
-}
-
static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
@@ -1153,16 +1146,13 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
|| u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
update_flags->bits.dcc_change = 1;
- if (pixel_format_to_bpp(u->plane_info->format) !=
- pixel_format_to_bpp(u->surface->format))
+ if (resource_pixel_format_to_bpp(u->plane_info->format) !=
+ resource_pixel_format_to_bpp(u->surface->format))
/* different bytes per element will require full bandwidth
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
- if (u->gamma && dce_use_lut(u->plane_info->format))
- update_flags->bits.gamma_change = 1;
-
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
sizeof(union dc_tiling_info)) != 0) {
update_flags->bits.swizzle_change = 1;
@@ -1179,7 +1169,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
if (update_flags->bits.rotation_change
|| update_flags->bits.stereo_format_change
|| update_flags->bits.pixel_format_change
- || update_flags->bits.gamma_change
|| update_flags->bits.bpp_change
|| update_flags->bits.bandwidth_change
|| update_flags->bits.output_tf_change)
@@ -1269,13 +1258,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->coeff_reduction_factor)
update_flags->bits.coeff_reduction_change = 1;
+ if (u->gamma) {
+ enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
+
+ if (u->plane_info)
+ format = u->plane_info->format;
+ else if (u->surface)
+ format = u->surface->format;
+
+ if (dce_use_lut(format))
+ update_flags->bits.gamma_change = 1;
+ }
+
if (update_flags->bits.in_transfer_func_change) {
type = UPDATE_TYPE_MED;
elevate_update_type(&overall_type, type);
}
if (update_flags->bits.input_csc_change
- || update_flags->bits.coeff_reduction_change) {
+ || update_flags->bits.coeff_reduction_change
+ || update_flags->bits.gamma_change) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
@@ -1379,7 +1381,7 @@ static void notify_display_count_to_smu(
* sent as part of pplib_apply_display_requirements.
* So just return.
*/
- if (!pp_smu->set_display_count)
+ if (!pp_smu || !pp_smu->set_display_count)
return;
display_count = 0;
@@ -1834,3 +1836,16 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
}
}
}
+
+void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
+{
+ info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index bd58dbae7d3e..fb04a4ad141f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -198,7 +198,7 @@ static bool program_hpd_filter(
return result;
}
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
struct gpio *hpd_pin;
@@ -612,7 +612,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (false == detect_sink(link, &new_connection_type)) {
+ if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -2559,23 +2559,24 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc,
&stream->timing);
- resource_build_info_frame(pipe_ctx);
- core_dc->hwss.update_info_frame(pipe_ctx);
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ resource_build_info_frame(pipe_ctx);
+ core_dc->hwss.update_info_frame(pipe_ctx);
- /* eDP lit up by bios already, no need to enable again. */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- pipe_ctx->stream->apply_edp_fast_boot_optimization) {
- pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
- pipe_ctx->stream->dpms_off = false;
- return;
- }
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ pipe_ctx->stream->apply_edp_fast_boot_optimization) {
+ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+ pipe_ctx->stream->dpms_off = false;
+ return;
+ }
- if (pipe_ctx->stream->dpms_off)
- return;
+ if (pipe_ctx->stream->dpms_off)
+ return;
- status = enable_link(state, pipe_ctx);
+ status = enable_link(state, pipe_ctx);
- if (status != DC_OK) {
+ if (status != DC_OK) {
DC_LOG_WARNING("enabling link %u failed: %d\n",
pipe_ctx->stream->sink->link->link_index,
status);
@@ -2590,23 +2591,26 @@ void core_link_enable_stream(
BREAK_TO_DEBUGGER();
return;
}
- }
+ }
- core_dc->hwss.enable_audio_stream(pipe_ctx);
+ core_dc->hwss.enable_audio_stream(pipe_ctx);
- /* turn off otg test pattern if enable */
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
+ /* turn off otg test pattern if enable */
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
- core_dc->hwss.enable_stream(pipe_ctx);
+ core_dc->hwss.enable_stream(pipe_ctx);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+
+ core_dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->sink->link->cur_link_settings);
+
+ }
- core_dc->hwss.unblank_stream(pipe_ctx,
- &pipe_ctx->stream->sink->link->cur_link_settings);
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 2d6a4300bfa4..b6fe29b9fb65 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1975,6 +1975,9 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream)
else
stream->phy_pix_clk =
stream->timing.pix_clk_khz;
+
+ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ stream->phy_pix_clk *= 2;
}
enum dc_status resource_map_pool_resources(
@@ -2096,6 +2099,14 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
+ if (dc->res_pool->funcs->get_default_swizzle_mode &&
+ pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
+ result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+ if (result != DC_OK)
+ return result;
+ }
+
/* Switch to dp clock source only if there is
* no non dp stream that shares the same timing
* with the dp stream.
@@ -2885,3 +2896,32 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
return res;
}
+
+unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ return 8;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return 12;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return 16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ return 32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return 64;
+ default:
+ ASSERT_CRITICAL(false);
+ return -1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7691139363a9..199527171100 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,13 +38,12 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.66"
+#define DC_VER "3.1.68"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
-
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
@@ -208,6 +207,7 @@ struct dc_clocks {
int dcfclk_deep_sleep_khz;
int fclk_khz;
int phyclk_khz;
+ int dramclk_khz;
};
struct dc_debug_options {
@@ -315,6 +315,8 @@ struct dc {
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
+
+ const char *build_id;
};
enum frame_buffer_mode {
@@ -599,6 +601,8 @@ struct dc_validation_set {
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
+void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
+
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 57f57cf0fe2a..7825e4b5e97c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -289,7 +289,8 @@ enum swizzle_mode_values {
DC_SW_VAR_S_X = 29,
DC_SW_VAR_D_X = 30,
DC_SW_VAR_R_X = 31,
- DC_SW_MAX
+ DC_SW_MAX = 32,
+ DC_SW_UNKNOWN = DC_SW_MAX
};
union dc_tiling_info {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 438fb35d87b8..3bfdccceb524 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -216,6 +216,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link);
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
/*
* DPCD access interfaces
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 4fb62780a696..6e12d640d020 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -659,4 +659,16 @@ enum i2c_mot_mode {
I2C_MOT_FALSE
};
+struct AsicStateEx {
+ unsigned int memoryClock;
+ unsigned int displayClock;
+ unsigned int engineClock;
+ unsigned int maxSupportedDppClock;
+ unsigned int dppClock;
+ unsigned int socClock;
+ unsigned int dcfClockDeepSleep;
+ unsigned int fClock;
+ unsigned int phyClock;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 3f5b2e6f7553..aaeb7faac0c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -312,7 +312,7 @@ static void process_channel_reply(
/* in case HPD is LOW, exit AUX transaction */
if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
- reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index bf6261a1584b..d89a097ba936 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -468,6 +468,9 @@ static void dce12_update_clocks(struct dccg *dccg,
{
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ /* TODO: Investigate why this is needed to fix display corruption. */
+ new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
+
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
@@ -661,6 +664,11 @@ static void dce_update_clocks(struct dccg *dccg,
bool safe_to_lower)
{
struct dm_pp_power_level_change_request level_change_req;
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
+
+ /* TODO: Investigate why this is needed to fix display corruption. */
+ if (!clk_dce->dfs_bypass_active)
+ new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
/* get max clock state from PPLIB */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4942590e8b9c..366bc8c2c643 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -662,21 +662,10 @@ bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing)
{
- /* default RGB only */
- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
- return true;
-
- if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
- return true;
-
- /* for DCE 8.x or later DP Y-only feature,
- * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
- if (crtc_timing->flags.Y_ONLY &&
- enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
- crtc_timing->display_color_depth != COLOR_DEPTH_666)
- return true;
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
- return false;
+ return true;
}
void dce110_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index b1cc38827f09..14754a87156c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -551,8 +551,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 300000,
.flags.bits.IS_HBR2_CAPABLE = true,
- .flags.bits.IS_TPS3_CAPABLE = true,
- .flags.bits.IS_YCBCR_CAPABLE = true
+ .flags.bits.IS_TPS3_CAPABLE = true
};
struct link_encoder *dce100_link_encoder_create(
@@ -690,7 +689,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+ }
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index dc1eed5ba996..b75ede5f84f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1377,26 +1377,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(
/* */
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
- /* FPGA does not program backend */
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
- pipe_ctx->stream_res.opp,
- COLOR_SPACE_YCBCR601,
- stream->timing.display_color_depth,
- pipe_ctx->stream->signal);
-
- pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
- pipe_ctx->stream_res.opp,
- &stream->bit_depth_params,
- &stream->clamping);
- return DC_OK;
- }
/* TODO: move to stream encoder */
if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
}
+
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
@@ -2550,7 +2537,7 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index e4c5db75c4c6..d6db3dbd9015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
-void dce110_set_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed);
-
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index b44cc7042249..de190935f0a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -570,8 +570,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 594000,
.flags.bits.IS_HBR2_CAPABLE = true,
- .flags.bits.IS_TPS3_CAPABLE = true,
- .flags.bits.IS_YCBCR_CAPABLE = true
+ .flags.bits.IS_TPS3_CAPABLE = true
};
static struct link_encoder *dce110_link_encoder_create(
@@ -720,7 +719,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+ }
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 0f8332ea1160..3ce79c208ddf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -555,8 +555,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
- .flags.bits.IS_TPS4_CAPABLE = true,
- .flags.bits.IS_YCBCR_CAPABLE = true
+ .flags.bits.IS_TPS4_CAPABLE = true
};
struct link_encoder *dce112_link_encoder_create(
@@ -694,9 +693,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
- if (pool->base.engines[i] != NULL)
- dce110_engine_destroy(&pool->base.engines[i]);
-
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
@@ -712,6 +708,11 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 5853522a6182..eb0f5f9a973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
-static void dce120_set_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
-{
- if (context->stream_count <= 0)
- return;
-
- dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
void dce120_hw_sequencer_construct(struct dc *dc)
{
/* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
- dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 59055801af44..79ab5f9f9115 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -533,7 +533,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+ }
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
@@ -609,7 +611,6 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true,
- .flags.bits.IS_YCBCR_CAPABLE = true
};
static struct link_encoder *dce120_link_encoder_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 1dc590ccc5f9..d68f951f9869 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -650,8 +650,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 297000,
.flags.bits.IS_HBR2_CAPABLE = true,
- .flags.bits.IS_TPS3_CAPABLE = true,
- .flags.bits.IS_YCBCR_CAPABLE = true
+ .flags.bits.IS_TPS3_CAPABLE = true
};
struct link_encoder *dce80_link_encoder_create(
@@ -739,7 +738,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+ }
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 1ea91e153d3a..4254e7e1a509 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -87,6 +87,23 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
}
+void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub)
+{
+ REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0);
+}
+
+bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
+{
+ uint32_t enable = 0;
+
+ REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
+
+ return true ? false : enable;
+}
+
+
bool hubbub1_verify_allow_pstate_change_high(
struct hubbub *hubbub)
{
@@ -116,7 +133,43 @@ bool hubbub1_verify_allow_pstate_change_high(
forced_pstate_allow = false;
}
- /* RV1:
+ /* RV2:
+ * dchubbubdebugind, at: 0xB
+ * description
+ * 0: Pipe0 Plane0 Allow Pstate Change
+ * 1: Pipe0 Plane1 Allow Pstate Change
+ * 2: Pipe0 Cursor0 Allow Pstate Change
+ * 3: Pipe0 Cursor1 Allow Pstate Change
+ * 4: Pipe1 Plane0 Allow Pstate Change
+ * 5: Pipe1 Plane1 Allow Pstate Change
+ * 6: Pipe1 Cursor0 Allow Pstate Change
+ * 7: Pipe1 Cursor1 Allow Pstate Change
+ * 8: Pipe2 Plane0 Allow Pstate Change
+ * 9: Pipe2 Plane1 Allow Pstate Change
+ * 10: Pipe2 Cursor0 Allow Pstate Change
+ * 11: Pipe2 Cursor1 Allow Pstate Change
+ * 12: Pipe3 Plane0 Allow Pstate Change
+ * 13: Pipe3 Plane1 Allow Pstate Change
+ * 14: Pipe3 Cursor0 Allow Pstate Change
+ * 15: Pipe3 Cursor1 Allow Pstate Change
+ * 16: Pipe4 Plane0 Allow Pstate Change
+ * 17: Pipe4 Plane1 Allow Pstate Change
+ * 18: Pipe4 Cursor0 Allow Pstate Change
+ * 19: Pipe4 Cursor1 Allow Pstate Change
+ * 20: Pipe5 Plane0 Allow Pstate Change
+ * 21: Pipe5 Plane1 Allow Pstate Change
+ * 22: Pipe5 Cursor0 Allow Pstate Change
+ * 23: Pipe5 Cursor1 Allow Pstate Change
+ * 24: Pipe6 Plane0 Allow Pstate Change
+ * 25: Pipe6 Plane1 Allow Pstate Change
+ * 26: Pipe6 Cursor0 Allow Pstate Change
+ * 27: Pipe6 Cursor1 Allow Pstate Change
+ * 28: WB0 Allow Pstate Change
+ * 29: WB1 Allow Pstate Change
+ * 30: Arbiter's allow_pstate_change
+ * 31: SOC pstate change request"
+ *
+ * RV1:
* dchubbubdebugind, at: 0x7
* description "3-0: Pipe0 cursor0 QOS
* 7-4: Pipe1 cursor0 QOS
@@ -140,7 +193,6 @@ bool hubbub1_verify_allow_pstate_change_high(
* 31: SOC pstate change request
*/
-
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
for (i = 0; i < pstate_wait_timeout_us; i++) {
@@ -802,5 +854,9 @@ void hubbub1_construct(struct hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0x7;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+ if (ctx->dce_version == DCN_VERSION_1_01)
+ hubbub->debug_test_index_pstate = 0xB;
+#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index d6e596eef4c5..d0f03d152913 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -203,6 +203,10 @@ void hubbub1_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower);
+void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub);
+
+bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub);
+
void hubbub1_toggle_watermark_change_req(
struct hubbub *hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 6bd4ec39f869..193184affefb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -44,6 +44,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
+#include "dc_link_dp.h"
#define DC_LOGGER_INIT(logger)
@@ -996,7 +997,21 @@ static void dcn10_init_hw(struct dc *dc)
} else {
if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ bool allow_self_fresh_force_enable =
+ hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub);
+
bios_golden_init(dc);
+
+ /* WA for making DF sleep when idle after resume from S0i3.
+ * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
+ * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
+ * before calling command table and it changed to 1 after,
+ * it should be set back to 0.
+ */
+ if (allow_self_fresh_force_enable == false &&
+ hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
+ hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub);
+
disable_vga(dc->hwseq);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 6f675206a136..ba6a8686062f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -606,22 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing)
{
- /* default RGB only */
- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
- return true;
-
- if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE)
- return true;
-
- /* for DCE 8.x or later DP Y-only feature,
- * we need ASIC cap + FeatureSupportDPYonly, not support 666
- */
- if (crtc_timing->flags.Y_ONLY &&
- enc10->base.features.flags.bits.IS_YCBCR_CAPABLE &&
- crtc_timing->display_color_depth != COLOR_DEPTH_666)
- return true;
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
- return false;
+ return true;
}
void dcn10_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 411f89218e01..54626682bab2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -98,7 +98,6 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
struct dc_crtc_timing patched_crtc_timing;
int vesa_sync_start;
int asic_blank_end;
- int interlace_factor;
int vertical_line_start;
patched_crtc_timing = *dc_crtc_timing;
@@ -112,16 +111,13 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
vesa_sync_start -
patched_crtc_timing.h_border_left;
- interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
-
vesa_sync_start = patched_crtc_timing.v_addressable +
patched_crtc_timing.v_border_bottom +
patched_crtc_timing.v_front_porch;
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
- patched_crtc_timing.v_border_top)
- * interlace_factor;
+ patched_crtc_timing.v_border_top);
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0) {
@@ -154,7 +150,7 @@ void optc1_program_vline_interrupt(
req_delta_lines--;
if (req_delta_lines > vsync_line)
- start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
+ start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2;
else
start_line = vsync_line - req_delta_lines;
@@ -186,7 +182,6 @@ void optc1_program_timing(
uint32_t v_sync_end;
uint32_t v_init, v_fp2;
uint32_t h_sync_polarity, v_sync_polarity;
- uint32_t interlace_factor;
uint32_t start_point = 0;
uint32_t field_num = 0;
uint32_t h_div_2;
@@ -237,16 +232,8 @@ void optc1_program_timing(
REG_UPDATE(OTG_H_SYNC_A_CNTL,
OTG_H_SYNC_A_POL, h_sync_polarity);
- /* Load vertical timing */
+ v_total = patched_crtc_timing.v_total - 1;
- /* CRTC_V_TOTAL = v_total - 1 */
- if (patched_crtc_timing.flags.INTERLACE) {
- interlace_factor = 2;
- v_total = 2 * patched_crtc_timing.v_total;
- } else {
- interlace_factor = 1;
- v_total = patched_crtc_timing.v_total - 1;
- }
REG_SET(OTG_V_TOTAL, 0,
OTG_V_TOTAL, v_total);
@@ -259,7 +246,7 @@ void optc1_program_timing(
OTG_V_TOTAL_MIN, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
- v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
+ v_sync_end = patched_crtc_timing.v_sync_width;
REG_UPDATE_2(OTG_V_SYNC_A,
OTG_V_SYNC_A_START, 0,
@@ -271,15 +258,13 @@ void optc1_program_timing(
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
- patched_crtc_timing.v_border_top)
- * interlace_factor;
+ patched_crtc_timing.v_border_top);
/* v_blank_start = v_blank_end + v_active */
asic_blank_start = asic_blank_end +
(patched_crtc_timing.v_border_top +
patched_crtc_timing.v_addressable +
- patched_crtc_timing.v_border_bottom)
- * interlace_factor;
+ patched_crtc_timing.v_border_bottom);
REG_UPDATE_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, asic_blank_start,
@@ -301,7 +286,7 @@ void optc1_program_timing(
0 : 1;
REG_UPDATE(OTG_V_SYNC_A_CNTL,
- OTG_V_SYNC_A_POL, v_sync_polarity);
+ OTG_V_SYNC_A_POL, v_sync_polarity);
v_init = asic_blank_start;
if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
@@ -532,7 +517,6 @@ bool optc1_validate_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *timing)
{
- uint32_t interlace_factor;
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
@@ -540,10 +524,8 @@ bool optc1_validate_timing(
ASSERT(timing != NULL);
- interlace_factor = timing->flags.INTERLACE ? 2 : 1;
v_blank = (timing->v_total - timing->v_addressable -
- timing->v_border_top - timing->v_border_bottom) *
- interlace_factor;
+ timing->v_border_top - timing->v_border_bottom);
h_blank = (timing->h_total - timing->h_addressable -
timing->h_border_right -
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index cb1b134b8fcb..a71453a15ae3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -507,6 +507,18 @@ static const struct resource_caps res_cap = {
.num_ddc = 4,
};
+#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+static const struct resource_caps rv2_res_cap = {
+ .num_timing_generator = 3,
+ .num_opp = 3,
+ .num_video_plane = 3,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 3,
+ .num_ddc = 3,
+};
+#endif
+
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
@@ -711,8 +723,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
- .flags.bits.IS_TPS4_CAPABLE = true,
- .flags.bits.IS_YCBCR_CAPABLE = true
+ .flags.bits.IS_TPS4_CAPABLE = true
};
struct link_encoder *dcn10_link_encoder_create(
@@ -897,7 +908,9 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+ }
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
@@ -1119,6 +1132,24 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
return DC_OK;
}
+static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+{
+ enum dc_status result = DC_OK;
+
+ enum surface_pixel_format surf_pix_format = plane_state->format;
+ unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
+
+ enum swizzle_mode_values swizzle = DC_SW_LINEAR;
+
+ if (bpp == 64)
+ swizzle = DC_SW_64KB_D;
+ else
+ swizzle = DC_SW_64KB_S;
+
+ plane_state->tiling_info.gfx9.swizzle = swizzle;
+ return result;
+}
+
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
@@ -1129,7 +1160,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_bandwidth = dcn_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
- .add_stream_to_ctx = dcn10_add_stream_to_ctx
+ .add_stream_to_ctx = dcn10_add_stream_to_ctx,
+ .get_default_swizzle_mode = dcn10_get_default_swizzle_mode
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1152,7 +1184,12 @@ static bool construct(
ctx->dc_bios->regs = &bios_regs;
- pool->base.res_cap = &res_cap;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+ if (ctx->dce_version == DCN_VERSION_1_01)
+ pool->base.res_cap = &rv2_res_cap;
+ else
+#endif
+ pool->base.res_cap = &res_cap;
pool->base.funcs = &dcn10_res_pool_funcs;
/*
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 8eee8ace1259..59c3ed43d609 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -346,7 +346,7 @@ static void process_channel_reply(
/* in case HPD is LOW, exit AUX transaction */
if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
- reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 831a1bdf622c..c1976c175b57 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -120,6 +120,9 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream);
+ enum dc_status (*get_default_swizzle_mode)(
+ struct dc_plane_state *plane_state);
+
};
struct audio_support{
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index ddbb673caa08..e688eb9b975c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -504,10 +504,10 @@ struct dcn_bw_internal_vars {
float prefetch_mode;
float dstx_after_scaler;
float dsty_after_scaler;
- float v_update_offset_pix;
+ float v_update_offset_pix[number_of_planes_minus_one + 1];
float total_repeater_delay_time;
- float v_update_width_pix;
- float v_ready_offset_pix;
+ float v_update_width_pix[number_of_planes_minus_one + 1];
+ float v_ready_offset_pix[number_of_planes_minus_one + 1];
float t_setup;
float t_wait;
float bandwidth_available_for_immediate_flip;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 58818920ed41..e28e9770e0a3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -58,7 +58,6 @@ struct encoder_feature_support {
uint32_t IS_HBR3_CAPABLE:1;
uint32_t IS_TPS3_CAPABLE:1;
uint32_t IS_TPS4_CAPABLE:1;
- uint32_t IS_YCBCR_CAPABLE:1;
uint32_t HDMI_6GB_EN:1;
} bits;
uint32_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 76d00c6dbca9..33b99e3ab10d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -172,4 +172,7 @@ void update_audio_usage(
const struct resource_pool *pool,
struct audio *audio,
bool acquired);
+
+unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index 03476b142d8e..f56d2891475f 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -102,4 +102,9 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal)
dc_is_hdmi_signal(signal));
}
+static inline bool dc_is_virtual_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_VIRTUAL);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 15427f4fc990..cdcefd087487 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1069,10 +1069,14 @@ static void build_evenly_distributed_points(
struct dividers dividers)
{
struct gamma_pixel *p = points;
- struct gamma_pixel *p_last = p + numberof_points - 1;
+ struct gamma_pixel *p_last;
uint32_t i = 0;
+ // This function should not gets called with 0 as a parameter
+ ASSERT(numberof_points > 0);
+ p_last = p + numberof_points - 1;
+
do {
struct fixed31_32 value = dc_fixpt_from_fraction(i,
numberof_points - 1);
@@ -1083,7 +1087,7 @@ static void build_evenly_distributed_points(
++p;
++i;
- } while (i != numberof_points);
+ } while (i < numberof_points);
p->r = dc_fixpt_div(p_last->r, dividers.divider1);
p->g = dc_fixpt_div(p_last->g, dividers.divider1);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index e1688902a1b0..4018c7180d00 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -480,22 +480,11 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
return false;
}
-void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
- const struct dc_stream_state *stream,
- const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
+static void build_vrr_infopacket_header_v1(enum signal_type signal,
+ struct dc_info_packet *infopacket,
+ unsigned int *payload_size)
{
- /* SPD info packet for FreeSync */
- unsigned char checksum = 0;
- unsigned int idx, payload_size = 0;
-
- /* Check if Freesync is supported. Return if false. If true,
- * set the corresponding bit in the info packet
- */
- if (!vrr->supported || !vrr->send_vsif)
- return;
-
- if (dc_is_hdmi_signal(stream->signal)) {
+ if (dc_is_hdmi_signal(signal)) {
/* HEADER */
@@ -510,9 +499,9 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
infopacket->hb2 = 0x08;
- payload_size = 0x08;
+ *payload_size = 0x08;
- } else if (dc_is_dp_signal(stream->signal)) {
+ } else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -536,9 +525,62 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
*/
infopacket->hb3 = 0x04;
- payload_size = 0x1B;
+ *payload_size = 0x1B;
}
+}
+
+static void build_vrr_infopacket_header_v2(enum signal_type signal,
+ struct dc_info_packet *infopacket,
+ unsigned int *payload_size)
+{
+ if (dc_is_hdmi_signal(signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB1 = Version = 0x02 */
+ infopacket->hb1 = 0x02;
+
+ /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
+ infopacket->hb2 = 0x09;
+
+ *payload_size = 0x0A;
+ } else if (dc_is_dp_signal(signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
+ * when used to associate audio related info packets
+ */
+ infopacket->hb0 = 0x00;
+
+ /* HB1 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB2 = [Bits 7:0 = Least significant eight bits -
+ * For INFOFRAME, the value must be 1Bh]
+ */
+ infopacket->hb2 = 0x1B;
+
+ /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x2]
+ * [Bits 1:0 = Most significant two bits = 0x00]
+ */
+ infopacket->hb3 = 0x08;
+
+ *payload_size = 0x1B;
+ }
+}
+
+static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+ struct dc_info_packet *infopacket)
+{
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
infopacket->sb[1] = 0x1A;
@@ -576,15 +618,39 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
*/
infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
- /* PB9 - PB27 = Reserved */
+ //FreeSync HDR
+ infopacket->sb[9] = 0;
+ infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
+ struct dc_info_packet *infopacket)
+{
+ if (app_tf != transfer_func_unknown) {
+ infopacket->valid = true;
+
+ infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
+
+ if (app_tf == transfer_func_gamma_22) {
+ infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
+ }
+ }
+}
+
+static void build_vrr_infopacket_checksum(unsigned int *payload_size,
+ struct dc_info_packet *infopacket)
+{
/* Calculate checksum */
+ unsigned int idx = 0;
+ unsigned char checksum = 0;
+
checksum += infopacket->hb0;
checksum += infopacket->hb1;
checksum += infopacket->hb2;
checksum += infopacket->hb3;
- for (idx = 1; idx <= payload_size; idx++)
+ for (idx = 1; idx <= *payload_size; idx++)
checksum += infopacket->sb[idx];
/* PB0 = Checksum (one byte complement) */
@@ -593,6 +659,64 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
infopacket->valid = true;
}
+static void build_vrr_infopacket_v1(enum signal_type signal,
+ const struct mod_vrr_params *vrr,
+ struct dc_info_packet *infopacket)
+{
+ /* SPD info packet for FreeSync */
+ unsigned int payload_size = 0;
+
+ build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
+ build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+ infopacket->valid = true;
+}
+
+static void build_vrr_infopacket_v2(enum signal_type signal,
+ const struct mod_vrr_params *vrr,
+ const enum color_transfer_func *app_tf,
+ struct dc_info_packet *infopacket)
+{
+ unsigned int payload_size = 0;
+
+ build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
+ build_vrr_infopacket_data(vrr, infopacket);
+
+ if (app_tf != NULL)
+ build_vrr_infopacket_fs2_data(*app_tf, infopacket);
+
+ build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+ infopacket->valid = true;
+}
+
+void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
+ const struct dc_stream_state *stream,
+ const struct mod_vrr_params *vrr,
+ enum vrr_packet_type packet_type,
+ const enum color_transfer_func *app_tf,
+ struct dc_info_packet *infopacket)
+{
+ /* SPD info packet for FreeSync */
+
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!vrr->supported || !vrr->send_vsif)
+ return;
+
+ switch (packet_type) {
+ case packet_type_fs2:
+ build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
+ break;
+ case packet_type_vrr:
+ case packet_type_fs1:
+ default:
+ build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
+ }
+}
+
void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
struct mod_freesync_config *in_config,
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index a0f32cde721c..949a8b62aa98 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -54,7 +54,7 @@
#ifndef MOD_FREESYNC_H_
#define MOD_FREESYNC_H_
-#include "dm_services.h"
+#include "mod_shared.h"
// Access structures
struct mod_freesync {
@@ -144,6 +144,8 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
+ enum vrr_packet_type packet_type,
+ const enum color_transfer_func *app_tf,
struct dc_info_packet *infopacket);
void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
new file mode 100644
index 000000000000..238c431ae483
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef MOD_SHARED_H_
+#define MOD_SHARED_H_
+
+enum color_transfer_func {
+ transfer_func_unknown,
+ transfer_func_srgb,
+ transfer_func_bt709,
+ transfer_func_pq2084,
+ transfer_func_pq2084_interim,
+ transfer_func_linear_0_1,
+ transfer_func_linear_0_125,
+ transfer_func_dolbyvision,
+ transfer_func_gamma_22,
+ transfer_func_gamma_26
+};
+
+enum vrr_packet_type {
+ packet_type_vrr,
+ packet_type_fs1,
+ packet_type_fs2
+};
+
+#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 52378fc69079..ff8bfb9b43b0 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -48,9 +48,12 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
unsigned int i;
unsigned int pixelEncoding = 0;
unsigned int colorimetryFormat = 0;
+ bool stereo3dSupport = false;
- if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE)
+ if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
vscPacketRevision = 1;
+ stereo3dSupport = true;
+ }
/*VSC packet set to 2 when DP revision >= 1.2*/
if (stream->psr_version != 0)
@@ -94,12 +97,59 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->hb2 = 0x01; // 01h = Revision number. VSC SDP supporting 3D stereo only
info_packet->hb3 = 0x01; // 01h = VSC SDP supporting 3D stereo only (HB2 = 01h).
- if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_INBAND_FA)
- info_packet->sb[0] = 0x1;
-
info_packet->valid = true;
}
+ if (stereo3dSupport) {
+ /* ==============================================================================================================|
+ * A. STEREO 3D
+ * ==============================================================================================================|
+ * VSC Payload (1 byte) From DP1.2 spec
+ *
+ * Bits 3:0 (Stereo Interface Method Code) | Bits 7:4 (Stereo Interface Method Specific Parameter)
+ * -----------------------------------------------------------------------------------------------------
+ * 0 = Non Stereo Video | Must be set to 0x0
+ * -----------------------------------------------------------------------------------------------------
+ * 1 = Frame/Field Sequential | 0x0: L + R view indication based on MISC1 bit 2:1
+ * | 0x1: Right when Stereo Signal = 1
+ * | 0x2: Left when Stereo Signal = 1
+ * | (others reserved)
+ * -----------------------------------------------------------------------------------------------------
+ * 2 = Stacked Frame | 0x0: Left view is on top and right view on bottom
+ * | (others reserved)
+ * -----------------------------------------------------------------------------------------------------
+ * 3 = Pixel Interleaved | 0x0: horiz interleaved, right view pixels on even lines
+ * | 0x1: horiz interleaved, right view pixels on odd lines
+ * | 0x2: checker board, start with left view pixel
+ * | 0x3: vertical interleaved, start with left view pixels
+ * | 0x4: vertical interleaved, start with right view pixels
+ * | (others reserved)
+ * -----------------------------------------------------------------------------------------------------
+ * 4 = Side-by-side | 0x0: left half represents left eye view
+ * | 0x1: left half represents right eye view
+ */
+ switch (stream->timing.timing_3d_format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[0] = 0x02; // Stacked Frame, Left view is on top and right view on bottom.
+ break;
+ case TIMING_3D_FORMAT_DP_HDMI_INBAND_FA:
+ case TIMING_3D_FORMAT_INBAND_FA:
+ info_packet->sb[0] = 0x01; // Frame/Field Sequential, L + R view indication based on MISC1 bit 2:1
+ break;
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[0] = 0x04; // Side-by-side
+ break;
+ default:
+ info_packet->sb[0] = 0x00; // No Stereo Video, Shall be cleared to 0x0.
+ break;
+ }
+
+ }
+
/* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication.
* Added in DP1.3, a DP Source device is allowed to indicate the pixel encoding/colorimetry
* format to the DP Sink device with VSC SDP only when the DP Sink device supports it
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 3d4c1b1ab8c4..03121ca64fe4 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -186,12 +186,8 @@ void mod_stats_destroy(struct mod_stats *mod_stats)
if (mod_stats != NULL) {
struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
- if (core_stats->time != NULL)
- kfree(core_stats->time);
-
- if (core_stats->events != NULL)
- kfree(core_stats->events);
-
+ kfree(core_stats->time);
+ kfree(core_stats->events);
kfree(core_stats);
}
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 86b167ec9863..2083c308007c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,7 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
#define AMD_PG_SUPPORT_MMHUB (1 << 13)
#define AMD_PG_SUPPORT_VCN (1 << 14)
+#define AMD_PG_SUPPORT_VCN_DPG (1 << 15)
enum PP_FEATURE_MASK {
PP_SCLK_DPM_MASK = 0x1,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index 4ce090db7ef7..529b37db274c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -2449,6 +2449,8 @@
#define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
#define mmGB_EDC_MODE 0x107e
#define mmGB_EDC_MODE_BASE_IDX 0
+#define mmCP_DEBUG 0x107f
+#define mmCP_DEBUG_BASE_IDX 0
#define mmCP_CPF_DEBUG 0x1080
#define mmCP_PQ_WPTR_POLL_CNTL 0x1083
#define mmCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
index efd2704d0f8f..0d6891095f62 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
@@ -175,4 +175,7 @@
#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0
#define mmSMUSVI0_PLANE0_CURRENTVID 0x0013
+#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 0
+#define mmSMUSVI0_TEL_PLANE0 0x0004
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
index 2487ab9621e9..b1d9d8be1119 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
@@ -258,4 +258,7 @@
#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
index 510ec3c70626..a9eb57a53e59 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
@@ -26,6 +26,18 @@
#define mmCG_MULT_THERMAL_STATUS 0x005f
#define mmCG_MULT_THERMAL_STATUS_BASE_IDX 0
+#define mmCG_FDO_CTRL0 0x0067
+#define mmCG_FDO_CTRL0_BASE_IDX 0
+
+#define mmCG_FDO_CTRL1 0x0068
+#define mmCG_FDO_CTRL1_BASE_IDX 0
+
+#define mmCG_FDO_CTRL2 0x0069
+#define mmCG_FDO_CTRL2_BASE_IDX 0
+
+#define mmCG_TACH_CTRL 0x006a
+#define mmCG_TACH_CTRL_BASE_IDX 0
+
#define mmTHM_THERMAL_INT_ENA 0x000a
#define mmTHM_THERMAL_INT_ENA_BASE_IDX 0
#define mmTHM_THERMAL_INT_CTRL 0x000b
diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
index f69533fa6abf..d130d92aee19 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
@@ -28,6 +28,16 @@
#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003FE00L
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
+#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
//THM_THERMAL_INT_ENA
#define THM_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 216a401028de..442ca7c471a5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -33,6 +33,14 @@
#define mmUVD_POWER_STATUS_BASE_IDX 1
#define mmCC_UVD_HARVESTING 0x00c7
#define mmCC_UVD_HARVESTING_BASE_IDX 1
+#define mmUVD_DPG_LMA_CTL 0x00d1
+#define mmUVD_DPG_LMA_CTL_BASE_IDX 1
+#define mmUVD_DPG_LMA_DATA 0x00d2
+#define mmUVD_DPG_LMA_DATA_BASE_IDX 1
+#define mmUVD_DPG_LMA_MASK 0x00d3
+#define mmUVD_DPG_LMA_MASK_BASE_IDX 1
+#define mmUVD_DPG_PAUSE 0x00d4
+#define mmUVD_DPG_PAUSE_BASE_IDX 1
#define mmUVD_SCRATCH1 0x00d5
#define mmUVD_SCRATCH1_BASE_IDX 1
#define mmUVD_SCRATCH2 0x00d6
@@ -74,6 +82,18 @@
#define mmUVD_LCM_CGC_CNTRL 0x0123
#define mmUVD_LCM_CGC_CNTRL_BASE_IDX 1
+#define mmUVD_MIF_CURR_UV_ADDR_CONFIG 0x0184
+#define mmUVD_MIF_CURR_UV_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_REF_UV_ADDR_CONFIG 0x0185
+#define mmUVD_MIF_REF_UV_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG 0x0186
+#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_CURR_ADDR_CONFIG 0x0192
+#define mmUVD_MIF_CURR_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_REF_ADDR_CONFIG 0x0193
+#define mmUVD_MIF_REF_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x01c5
+#define mmUVD_MIF_RECON1_ADDR_CONFIG_BASE_IDX 1
// addressBlock: uvd_uvdnpdec
// base address: 0x20000
@@ -319,6 +339,8 @@
#define mmUVD_LMI_VM_CTRL_BASE_IDX 1
#define mmUVD_LMI_SWAP_CNTL 0x056d
#define mmUVD_LMI_SWAP_CNTL_BASE_IDX 1
+#define mmUVD_MPC_CNTL 0x0577
+#define mmUVD_MPC_CNTL_BASE_IDX 1
#define mmUVD_MPC_SET_MUXA0 0x0579
#define mmUVD_MPC_SET_MUXA0_BASE_IDX 1
#define mmUVD_MPC_SET_MUXA1 0x057a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
index 124383dac284..63457f9df4c5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
@@ -87,6 +87,26 @@
//CC_UVD_HARVESTING
#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
+//UVD_DPG_LMA_CTL
+#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0
+#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2
+#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10
+#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L
+#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L
+//UVD_DPG_PAUSE
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L
//UVD_SCRATCH1
#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0
#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL
@@ -965,6 +985,7 @@
#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8
#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9
#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11
#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
@@ -973,6 +994,7 @@
#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L
//UVD_MASTINT_EN
#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1
@@ -983,6 +1005,7 @@
#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
//UVD_SYS_INT_EN
+#define UVD_SYS_INT_EN__UVD_JRBC_EN__SHIFT 0x4
#define UVD_SYS_INT_EN__UVD_JRBC_EN_MASK 0x00000010L
//JPEG_CGC_CTRL
#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
@@ -1024,6 +1047,19 @@
#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
#define UVD_LMI_CTRL__RFU_MASK 0xF8000000L
+//UVD_LMI_STATUS
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
//UVD_LMI_SWAP_CNTL
#define UVD_LMI_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
#define UVD_LMI_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
@@ -1057,6 +1093,9 @@
#define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0C000000L
#define UVD_LMI_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
#define UVD_LMI_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L
+//UVD_MPC_CNTL
+#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3
+#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
//UVD_MPC_SET_MUXA0
#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0
#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6
@@ -1138,7 +1177,11 @@
#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL
//UVD_VCPU_CNTL
#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9
+#define UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
//UVD_SOFT_RESET
#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0
#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 8ae7adb7329b..d2e7c0fa96c2 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1532,6 +1532,94 @@ struct atom_smc_dpm_info_v4_3
uint32_t boardreserved[10];
};
+struct smudpm_i2ccontrollerconfig_t {
+ uint32_t enabled;
+ uint32_t slaveaddress;
+ uint32_t controllerport;
+ uint32_t controllername;
+ uint32_t thermalthrottler;
+ uint32_t i2cprotocol;
+ uint32_t i2cspeed;
+};
+
+struct atom_smc_dpm_info_v4_4
+{
+ struct atom_common_table_header table_header;
+ uint32_t i2c_padding[3];
+
+ uint16_t maxvoltagestepgfx;
+ uint16_t maxvoltagestepsoc;
+
+ uint8_t vddgfxvrmapping;
+ uint8_t vddsocvrmapping;
+ uint8_t vddmem0vrmapping;
+ uint8_t vddmem1vrmapping;
+
+ uint8_t gfxulvphasesheddingmask;
+ uint8_t soculvphasesheddingmask;
+ uint8_t externalsensorpresent;
+ uint8_t padding8_v;
+
+ uint16_t gfxmaxcurrent;
+ uint8_t gfxoffset;
+ uint8_t padding_telemetrygfx;
+
+ uint16_t socmaxcurrent;
+ uint8_t socoffset;
+ uint8_t padding_telemetrysoc;
+
+ uint16_t mem0maxcurrent;
+ uint8_t mem0offset;
+ uint8_t padding_telemetrymem0;
+
+ uint16_t mem1maxcurrent;
+ uint8_t mem1offset;
+ uint8_t padding_telemetrymem1;
+
+
+ uint8_t acdcgpio;
+ uint8_t acdcpolarity;
+ uint8_t vr0hotgpio;
+ uint8_t vr0hotpolarity;
+
+ uint8_t vr1hotgpio;
+ uint8_t vr1hotpolarity;
+ uint8_t padding1;
+ uint8_t padding2;
+
+
+ uint8_t ledpin0;
+ uint8_t ledpin1;
+ uint8_t ledpin2;
+ uint8_t padding8_4;
+
+
+ uint8_t pllgfxclkspreadenabled;
+ uint8_t pllgfxclkspreadpercent;
+ uint16_t pllgfxclkspreadfreq;
+
+
+ uint8_t uclkspreadenabled;
+ uint8_t uclkspreadpercent;
+ uint16_t uclkspreadfreq;
+
+
+ uint8_t fclkspreadenabled;
+ uint8_t fclkspreadpercent;
+ uint16_t fclkspreadfreq;
+
+
+ uint8_t fllgfxclkspreadenabled;
+ uint8_t fllgfxclkspreadpercent;
+ uint16_t fllgfxclkspreadfreq;
+
+
+ struct smudpm_i2ccontrollerconfig_t i2ccontrollers[7];
+
+
+ uint32_t boardreserved[10];
+};
+
/*
***************************************************************************
Data Table asic_profiling_info structure
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index f43ed96cfa6c..64ecffd52126 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -146,10 +146,10 @@ struct kgd2kfd_shared_resources {
* is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
*
* KFD currently uses 1024 (= 0x3ff) doorbells per process. If
- * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means
- * mask would be set to 0x1f8 and val set to 0x0f0.
+ * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
+ * mask would be set to 0x1e0 and val set to 0x0e0.
*/
- unsigned int sdma_doorbell[2][2];
+ unsigned int sdma_doorbell[2][8];
unsigned int reserved_doorbell_mask;
unsigned int reserved_doorbell_val;
@@ -409,9 +409,9 @@ struct kfd2kgd_calls {
struct dma_fence **ef);
void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
- uint32_t (*get_process_page_dir)(void *vm);
+ uint64_t (*get_process_page_dir)(void *vm);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
- uint32_t vmid, uint32_t page_table_base);
+ uint32_t vmid, uint64_t page_table_base);
int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
uint64_t size, void *vm,
struct kgd_mem **mem, uint64_t *offset,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 448dee481a38..980e696989b1 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -113,6 +113,9 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GPU_POWER,
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
+ AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
+ AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ AMDGPU_PP_SENSOR_MAX_FAN_RPM,
};
enum amd_pp_task {
@@ -227,6 +230,7 @@ struct amd_pm_funcs {
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
enum amd_pm_state_type (*get_current_power_state)(void *handle);
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
+ int (*set_fan_speed_rpm)(void *handle, uint32_t rpm);
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
int (*get_pp_table)(void *handle, char **table);
int (*set_pp_table)(void *handle, const char *buf, size_t size);
@@ -271,6 +275,7 @@ struct amd_pm_funcs {
int (*get_display_mode_validation_clocks)(void *handle,
struct amd_pp_simple_clock_info *clocks);
int (*notify_smu_enable_pwe)(void *handle);
+ int (*enable_mgpu_fan_boost)(void *handle);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index da4ebff5b74d..e8964cae6b93 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -109,11 +109,8 @@ static int pp_sw_fini(void *handle)
hwmgr_sw_fini(hwmgr);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- amdgpu_ucode_fini_bo(adev);
- }
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
return 0;
}
@@ -124,9 +121,6 @@ static int pp_hw_init(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_init_bo(adev);
-
ret = hwmgr_hw_init(hwmgr);
if (ret)
@@ -273,8 +267,23 @@ const struct amdgpu_ip_block_version pp_smu_ip_block =
.funcs = &pp_ip_funcs,
};
+/* This interface only be supported On Vi,
+ * because only smu7/8 can help to load gfx/sdma fw,
+ * smu need to be enabled before load other ip's fw.
+ * so call start smu to load smu7 fw and other ip's fw
+ */
static int pp_dpm_load_fw(void *handle)
{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
+ return -EINVAL;
+
+ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
+ pr_err("fw load failed\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -576,6 +585,24 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
return ret;
}
+static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+}
+
static int pp_dpm_get_pp_num_states(void *handle,
struct pp_states_info *data)
{
@@ -813,6 +840,12 @@ static int pp_dpm_read_sensor(void *handle, int idx,
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
*((uint32_t *)value) = hwmgr->pstate_mclk;
return 0;
+ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+ return 0;
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
+ return 0;
default:
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
@@ -861,9 +894,14 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
pr_info("%s was not implemented.\n", __func__);
return ret;
}
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_info("power profile setting is for manual dpm mode only.\n");
+ return ret;
+ }
+
mutex_lock(&hwmgr->smu_lock);
- if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
- ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
+ ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1196,6 +1234,21 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
}
+static void pp_dpm_powergate_sdma(void *handle, bool gate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return;
+
+ if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return;
+ }
+
+ hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
+}
+
static int pp_set_powergating_by_smu(void *handle,
uint32_t block_type, bool gate)
{
@@ -1218,6 +1271,9 @@ static int pp_set_powergating_by_smu(void *handle,
case AMD_IP_BLOCK_TYPE_ACP:
pp_dpm_powergate_acp(handle, gate);
break;
+ case AMD_IP_BLOCK_TYPE_SDMA:
+ pp_dpm_powergate_sdma(handle, gate);
+ break;
default:
break;
}
@@ -1243,6 +1299,24 @@ static int pp_notify_smu_enable_pwe(void *handle)
return 0;
}
+static int pp_enable_mgpu_fan_boost(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) {
+ return 0;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1255,6 +1329,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
+ .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
.get_pp_num_states = pp_dpm_get_pp_num_states,
.get_pp_table = pp_dpm_get_pp_table,
.set_pp_table = pp_dpm_set_pp_table,
@@ -1287,4 +1362,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
+ .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 7500a3e61dba..47ac92369739 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -89,7 +89,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
hwmgr->fan_ctrl_is_in_default_mode = true;
- hwmgr->reload_fw = 1;
hwmgr_init_workload_prority(hwmgr);
switch (hwmgr->chip_family) {
@@ -209,17 +208,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (!hwmgr || !hwmgr->smumgr_funcs)
- return -EINVAL;
-
- if (hwmgr->smumgr_funcs->start_smu) {
- ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
- if (ret) {
- pr_err("smc start failed\n");
- return -EINVAL;
- }
- }
-
if (!hwmgr->pm_en)
return 0;
@@ -320,13 +308,6 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (!hwmgr)
return -EINVAL;
- if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
- if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
- pr_err("smc start failed\n");
- return -EINVAL;
- }
- }
-
if (!hwmgr->pm_en)
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index d27c1c9df286..4588bddf8b33 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -488,7 +488,8 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
return 0;
}
-int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
+int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
+ uint8_t id, uint32_t *frequency)
{
struct amdgpu_device *adev = hwmgr->adev;
struct atom_get_smu_clock_info_parameters_v3_1 parameters;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 22e21668c93a..fe9e8ceef50e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -236,7 +236,7 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_smc_dpm_parameters *param);
int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
- BIOS_CLKID id, uint32_t *frequency);
+ uint8_t id, uint32_t *frequency);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 9808bd48b386..dd18cb710391 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -552,6 +552,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
{
struct smu10_hwmgr *data = hwmgr->backend;
struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
+ uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -563,6 +565,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
(adev->rev_id >= 8))
return 0;
+ if (min_sclk < data->gfx_min_freq_limit)
+ min_sclk = data->gfx_min_freq_limit;
+
+ min_sclk /= 100; /* transfer 10KHz to MHz */
+ if (min_mclk < data->clock_table.FClocks[0].Freq)
+ min_mclk = data->clock_table.FClocks[0].Freq;
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -595,18 +604,18 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_min_freq_limit/100);
+ min_sclk);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_min_freq_limit/100);
+ min_sclk);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_MIN_FCLK);
+ min_mclk);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_MIN_FCLK);
+ min_mclk);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -638,12 +647,12 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_AUTO:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_min_freq_limit/100);
+ min_sclk);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
SMU10_UMD_PSTATE_PEAK_FCLK :
- SMU10_UMD_PSTATE_MIN_FCLK);
+ min_mclk);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
@@ -674,10 +683,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
data->gfx_min_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_MIN_FCLK);
+ min_mclk);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_MIN_FCLK);
+ min_mclk);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1144,6 +1153,14 @@ static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
}
+static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
+{
+ if (gate)
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
+ else
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
+}
+
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
{
if (bgate) {
@@ -1199,6 +1216,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.smus_notify_pwe = smu10_smus_notify_pwe,
.display_clock_voltage_request = smu10_display_clock_voltage_request,
.powergate_gfx = smu10_gfx_off_control,
+ .powergate_sdma = smu10_powergate_sdma,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 04b7da0e39a6..6c99cbf51c08 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4106,17 +4106,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
source->funcs = &smu7_irq_funcs;
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
- AMDGPU_IH_CLIENTID_LEGACY,
+ AMDGPU_IRQ_CLIENTID_LEGACY,
VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
- AMDGPU_IH_CLIENTID_LEGACY,
+ AMDGPU_IRQ_CLIENTID_LEGACY,
VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
- AMDGPU_IH_CLIENTID_LEGACY,
+ AMDGPU_IRQ_CLIENTID_LEGACY,
VISLANDS30_IV_SRCID_GPIO_19,
source);
@@ -5035,6 +5035,18 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw
return 0;
}
+static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ result = smu7_disable_dpm_tasks(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "[disable_dpm_tasks] Failed to disable DPM!",
+ );
+
+ return result;
+}
+
static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
.backend_fini = &smu7_hwmgr_backend_fini,
@@ -5092,6 +5104,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .power_off_asic = smu7_power_off_asic,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 44527755e747..5bdc0df5a9f4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -260,6 +260,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
+ speed == 0 ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
@@ -272,7 +273,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_STATUS, TACH_PERIOD, tach_period);
+ CG_TACH_CTRL, TARGET_PERIOD, tach_period);
return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index b8637049198d..fef111ddb736 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -880,7 +880,7 @@ static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
smu8_update_low_mem_pstate(hwmgr, input);
return 0;
-};
+}
static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
@@ -934,14 +934,6 @@ static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
hw_data->cc6_settings.cpu_pstate_disable = false;
}
-static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
-{
- smu8_power_up_display_clock_sys_pll(hwmgr);
- smu8_clear_nb_dpm_flag(hwmgr);
- smu8_reset_cc6_data(hwmgr);
- return 0;
-};
-
static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
{
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
@@ -1011,6 +1003,17 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
data->acp_boot_level = 0xff;
}
+static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ smu8_program_voting_clients(hwmgr);
+ if (smu8_start_dpm(hwmgr))
+ return -EINVAL;
+ smu8_program_bootup_state(hwmgr);
+ smu8_reset_acp_boot_level(hwmgr);
+
+ return 0;
+}
+
static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
smu8_disable_nb_dpm(hwmgr);
@@ -1020,18 +1023,16 @@ static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return -EINVAL;
return 0;
-};
+}
-static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
{
- smu8_program_voting_clients(hwmgr);
- if (smu8_start_dpm(hwmgr))
- return -EINVAL;
- smu8_program_bootup_state(hwmgr);
- smu8_reset_acp_boot_level(hwmgr);
-
+ smu8_disable_dpm_tasks(hwmgr);
+ smu8_power_up_display_clock_sys_pll(hwmgr);
+ smu8_clear_nb_dpm_flag(hwmgr);
+ smu8_reset_cc6_data(hwmgr);
return 0;
-};
+}
static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
@@ -1227,14 +1228,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
- if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
+ smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+ }
return 0;
}
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 2aab1b475945..4714b5b59825 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -39,6 +39,50 @@ uint16_t convert_to_vddc(uint8_t vid)
return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
}
+int phm_copy_clock_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+ const uint32_t *pptable_array,
+ uint32_t power_saving_clock_count)
+{
+ uint32_t array_size, i;
+ uint32_t *table;
+
+ array_size = sizeof(uint32_t) * power_saving_clock_count;
+ table = kzalloc(array_size, GFP_KERNEL);
+ if (NULL == table)
+ return -ENOMEM;
+
+ for (i = 0; i < power_saving_clock_count; i++)
+ table[i] = le32_to_cpu(pptable_array[i]);
+
+ *pptable_info_array = table;
+
+ return 0;
+}
+
+int phm_copy_overdrive_settings_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+ const uint32_t *pptable_array,
+ uint32_t od_setting_count)
+{
+ uint32_t array_size, i;
+ uint32_t *table;
+
+ array_size = sizeof(uint32_t) * od_setting_count;
+ table = kzalloc(array_size, GFP_KERNEL);
+ if (NULL == table)
+ return -ENOMEM;
+
+ for (i = 0; i < od_setting_count; i++)
+ table[i] = le32_to_cpu(pptable_array[i]);
+
+ *pptable_info_array = table;
+
+ return 0;
+}
+
uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
{
u32 mask = 0;
@@ -545,7 +589,7 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
- if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
+ if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 5454289d5226..ad33983a8064 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -47,6 +47,18 @@ struct watermarks {
uint32_t padding[7];
};
+int phm_copy_clock_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+ const uint32_t *pptable_array,
+ uint32_t power_saving_clock_count);
+
+int phm_copy_overdrive_settings_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+ const uint32_t *pptable_array,
+ uint32_t od_setting_count);
+
extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
uint32_t index,
uint32_t value, uint32_t mask);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index ca9be583fb62..419a1d77d661 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -39,6 +39,7 @@
#include "soc15_common.h"
#include "pppcielanes.h"
#include "vega10_hwmgr.h"
+#include "vega10_smumgr.h"
#include "vega10_processpptables.h"
#include "vega10_pptable.h"
#include "vega10_thermal.h"
@@ -3713,6 +3714,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
*((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
return 0;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
+ if (!ret)
+ *size = 8;
+ break;
default:
ret = -EINVAL;
break;
@@ -4940,16 +4946,6 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_performance_level = vega10_get_performance_level,
};
-int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
- bool enable, uint32_t feature_mask)
-{
- int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
- PPSMC_MSG_DisableSmuFeatures;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- msg, feature_mask);
-}
-
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 339820da9e6a..89870556de1b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -441,7 +441,5 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
- bool enable, uint32_t feature_mask);
#endif /* _VEGA10_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 22364875a943..2d88abf97e7b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -23,6 +23,7 @@
#include "hwmgr.h"
#include "vega10_hwmgr.h"
+#include "vega10_smumgr.h"
#include "vega10_powertune.h"
#include "vega10_ppsmc.h"
#include "vega10_inc.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 16b1a9cf6cf0..b8747a5c9204 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -451,23 +451,23 @@ static int get_tdp_table(
le16_to_cpu(power_tune_table_v2->usLoadLineResistance);
} else {
power_tune_table_v3 = (ATOM_Vega10_PowerTune_Table_V3 *)table;
- tdp_table->usMaximumPowerDeliveryLimit = power_tune_table_v3->usSocketPowerLimit;
- tdp_table->usTDC = power_tune_table_v3->usTdcLimit;
- tdp_table->usEDCLimit = power_tune_table_v3->usEdcLimit;
- tdp_table->usSoftwareShutdownTemp = power_tune_table_v3->usSoftwareShutdownTemp;
- tdp_table->usTemperatureLimitTedge = power_tune_table_v3->usTemperatureLimitTedge;
- tdp_table->usTemperatureLimitHotspot = power_tune_table_v3->usTemperatureLimitHotSpot;
- tdp_table->usTemperatureLimitLiquid1 = power_tune_table_v3->usTemperatureLimitLiquid1;
- tdp_table->usTemperatureLimitLiquid2 = power_tune_table_v3->usTemperatureLimitLiquid2;
- tdp_table->usTemperatureLimitHBM = power_tune_table_v3->usTemperatureLimitHBM;
- tdp_table->usTemperatureLimitVrVddc = power_tune_table_v3->usTemperatureLimitVrSoc;
- tdp_table->usTemperatureLimitVrMvdd = power_tune_table_v3->usTemperatureLimitVrMem;
- tdp_table->usTemperatureLimitPlx = power_tune_table_v3->usTemperatureLimitPlx;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v3->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table_v3->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v3->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp = le16_to_cpu(power_tune_table_v3->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge = le16_to_cpu(power_tune_table_v3->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx = le16_to_cpu(power_tune_table_v3->usTemperatureLimitPlx);
tdp_table->ucLiquid1_I2C_address = power_tune_table_v3->ucLiquid1_I2C_address;
tdp_table->ucLiquid2_I2C_address = power_tune_table_v3->ucLiquid2_I2C_address;
- tdp_table->usBoostStartTemperature = power_tune_table_v3->usBoostStartTemperature;
- tdp_table->usBoostStopTemperature = power_tune_table_v3->usBoostStopTemperature;
- tdp_table->ulBoostClock = power_tune_table_v3->ulBoostClock;
+ tdp_table->usBoostStartTemperature = le16_to_cpu(power_tune_table_v3->usBoostStartTemperature);
+ tdp_table->usBoostStopTemperature = le16_to_cpu(power_tune_table_v3->usBoostStopTemperature);
+ tdp_table->ulBoostClock = le32_to_cpu(power_tune_table_v3->ulBoostClock);
get_scl_sda_value(power_tune_table_v3->ucLiquid_I2C_Line, &scl, &sda);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index aa044c1955fe..3f807d6c95ce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -23,6 +23,7 @@
#include "vega10_thermal.h"
#include "vega10_hwmgr.h"
+#include "vega10_smumgr.h"
#include "vega10_ppsmc.h"
#include "vega10_inc.h"
#include "soc15_common.h"
@@ -311,6 +312,7 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
int result = 0;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+ speed == 0 ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return -1;
@@ -321,9 +323,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (!result) {
crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
- WREG32_SOC15(THM, 0, mmCG_TACH_STATUS,
- REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
- CG_TACH_STATUS, TACH_PERIOD,
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD,
tach_period));
}
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 0789d64246ca..9600e2f226e9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -745,8 +745,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
- result = vega12_copy_table_to_smc(hwmgr,
- (uint8_t *)pp_table, TABLE_PPTABLE);
+ result = smum_smc_table_manager(hwmgr,
+ (uint8_t *)pp_table, TABLE_PPTABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -1317,7 +1317,11 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
-
+ break;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value);
+ if (!ret)
+ *size = 8;
break;
default:
ret = -EINVAL;
@@ -2103,8 +2107,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
- result = vega12_copy_table_to_smc(hwmgr,
- (uint8_t *)wm_table, TABLE_WATERMARKS);
+ result = smum_smc_table_manager(hwmgr,
+ (uint8_t *)wm_table, TABLE_WATERMARKS, false);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index cb3a5b1737c8..9817f7a5ed29 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -99,50 +99,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
return 0;
}
-static int copy_clock_limits_array(
- struct pp_hwmgr *hwmgr,
- uint32_t **pptable_info_array,
- const uint32_t *pptable_array)
-{
- uint32_t array_size, i;
- uint32_t *table;
-
- array_size = sizeof(uint32_t) * ATOM_VEGA12_PPCLOCK_COUNT;
-
- table = kzalloc(array_size, GFP_KERNEL);
- if (NULL == table)
- return -ENOMEM;
-
- for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++)
- table[i] = pptable_array[i];
-
- *pptable_info_array = table;
-
- return 0;
-}
-
-static int copy_overdrive_settings_limits_array(
- struct pp_hwmgr *hwmgr,
- uint32_t **pptable_info_array,
- const uint32_t *pptable_array)
-{
- uint32_t array_size, i;
- uint32_t *table;
-
- array_size = sizeof(uint32_t) * ATOM_VEGA12_ODSETTING_COUNT;
-
- table = kzalloc(array_size, GFP_KERNEL);
- if (NULL == table)
- return -ENOMEM;
-
- for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++)
- table[i] = pptable_array[i];
-
- *pptable_info_array = table;
-
- return 0;
-}
-
static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
{
struct pp_atomfwctrl_smc_dpm_parameters smc_dpm_table;
@@ -250,14 +206,22 @@ static int init_powerplay_table_information(
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
- if (powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX] > VEGA12_ENGINECLOCK_HARDMAX)
+ if (le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]) > VEGA12_ENGINECLOCK_HARDMAX)
hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA12_ENGINECLOCK_HARDMAX;
else
- hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX];
- hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX];
-
- copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax);
- copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin);
+ hwmgr->platform_descriptor.overdriveLimit.engineClock =
+ le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]);
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock =
+ le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX]);
+
+ phm_copy_overdrive_settings_limits_array(hwmgr,
+ &pptable_information->od_settings_max,
+ powerplay_table->ODSettingsMax,
+ ATOM_VEGA12_ODSETTING_COUNT);
+ phm_copy_overdrive_settings_limits_array(hwmgr,
+ &pptable_information->od_settings_min,
+ powerplay_table->ODSettingsMin,
+ ATOM_VEGA12_ODSETTING_COUNT);
/* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
@@ -267,15 +231,15 @@ static int init_powerplay_table_information(
&& hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
- pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
- pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
- pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
- pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
- pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
+ pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1);
+ pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2);
+ pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit);
+ pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit);
+ pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit);
- pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
+ pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp);
- hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE];
+ hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE]);
disable_power_control = 0;
if (!disable_power_control) {
@@ -285,8 +249,8 @@ static int init_powerplay_table_information(
PHM_PlatformCaps_PowerControl);
}
- copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax);
- copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin);
+ phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax, ATOM_VEGA12_PPCLOCK_COUNT);
+ phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin, ATOM_VEGA12_PPCLOCK_COUNT);
pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
if (pptable_information->smc_pptable == NULL)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index d45cbfe8e184..b4dbbb7c334c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -46,6 +46,9 @@
#include "ppinterrupt.h"
#include "pp_overdriver.h"
#include "pp_thermal.h"
+#include "soc15_common.h"
+#include "smuio/smuio_9_0_offset.h"
+#include "smuio/smuio_9_0_sh_mask.h"
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
@@ -461,7 +464,7 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- vega20_read_arg_from_smc(hwmgr, num_of_levels);
+ *num_of_levels = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(*num_of_levels > 0,
"[GetNumOfDpmLevel] number of clk levels is invalid!",
return -EINVAL);
@@ -481,7 +484,7 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
"[GetDpmFreqByIndex] failed to get dpm freq by index!",
return ret);
- vega20_read_arg_from_smc(hwmgr, clk);
+ *clk = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(*clk,
"[GetDpmFreqByIndex] clk value is invalid!",
return -EINVAL);
@@ -743,8 +746,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
- result = vega20_copy_table_to_smc(hwmgr,
- (uint8_t *)pp_table, TABLE_PPTABLE);
+ result = smum_smc_table_manager(hwmgr,
+ (uint8_t *)pp_table, TABLE_PPTABLE, false);
PP_ASSERT_WITH_CODE(!result,
"[InitSMCTable] Failed to upload PPtable!",
return result);
@@ -1044,7 +1047,7 @@ static int vega20_od8_get_gfx_clock_base_voltage(
"[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
return ret);
- vega20_read_arg_from_smc(hwmgr, voltage);
+ *voltage = smum_get_argument(hwmgr);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1067,7 +1070,7 @@ static int vega20_od8_initialize_default_settings(
vega20_od8_set_feature_id(hwmgr);
/* Set default values */
- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
PP_ASSERT_WITH_CODE(!ret,
"Failed to export over drive table!",
return ret);
@@ -1195,7 +1198,7 @@ static int vega20_od8_initialize_default_settings(
}
}
- ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
PP_ASSERT_WITH_CODE(!ret,
"Failed to import over drive table!",
return ret);
@@ -1214,7 +1217,7 @@ static int vega20_od8_set_settings(
struct vega20_od8_single_setting *od8_settings =
data->od8_settings.od8_settings_array;
- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
PP_ASSERT_WITH_CODE(!ret,
"Failed to export over drive table!",
return ret);
@@ -1271,7 +1274,7 @@ static int vega20_od8_set_settings(
break;
}
- ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
PP_ASSERT_WITH_CODE(!ret,
"Failed to import over drive table!",
return ret);
@@ -1401,7 +1404,7 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
(clock_select << 16))) == 0,
"[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
return ret);
- vega20_read_arg_from_smc(hwmgr, clock);
+ *clock = smum_get_argument(hwmgr);
/* if DC limit is zero, return AC limit */
if (*clock == 0) {
@@ -1410,7 +1413,7 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
(clock_select << 16))) == 0,
"[GetMaxSustainableClock] failed to get max AC clock from SMC!",
return ret);
- vega20_read_arg_from_smc(hwmgr, clock);
+ *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -1474,6 +1477,19 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ result = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_SetMGpuFanBoostLimitRpm);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableMgpuFan] Failed to enable mgpu fan boost!",
+ return result);
+
+ return 0;
+}
+
static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
@@ -1544,6 +1560,14 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to populate umdpstate clocks!",
return result);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
+ POWER_SOURCE_AC << 16);
+ PP_ASSERT_WITH_CODE(!result,
+ "[GetPptLimit] get default PPT limit failed!",
+ return result);
+ hwmgr->power_limit =
+ hwmgr->default_power_limit = smum_get_argument(hwmgr);
+
return 0;
}
@@ -1770,14 +1794,14 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
"[GetClockRanges] Failed to get max clock from SMC!",
return ret);
- vega20_read_arg_from_smc(hwmgr, clock);
+ *clock = smum_get_argument(hwmgr);
} else {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMinDpmFreq,
(clock_select << 16))) == 0,
"[GetClockRanges] Failed to get min clock from SMC!",
return ret);
- vega20_read_arg_from_smc(hwmgr, clock);
+ *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -1841,7 +1865,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
int ret = 0;
SmuMetrics_t metrics_table;
- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true);
PP_ASSERT_WITH_CODE(!ret,
"Failed to export SMU METRICS table!",
return ret);
@@ -1862,7 +1886,7 @@ static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
return ret);
- vega20_read_arg_from_smc(hwmgr, &gfx_clk);
+ gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1880,7 +1904,7 @@ static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
return ret);
- vega20_read_arg_from_smc(hwmgr, &mem_clk);
+ mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1893,7 +1917,7 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
int ret = 0;
SmuMetrics_t metrics_table;
- ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true);
PP_ASSERT_WITH_CODE(!ret,
"Failed to export SMU METRICS table!",
return ret);
@@ -1907,6 +1931,8 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t val_vid;
int ret = 0;
switch (idx) {
@@ -1941,6 +1967,18 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 16;
ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
+ *((uint32_t *)value) =
+ (uint32_t)convert_to_vddc((uint8_t)val_vid);
+ break;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
+ if (!ret)
+ *size = 8;
+ break;
default:
ret = -EINVAL;
break;
@@ -2264,6 +2302,25 @@ static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
return AMD_FAN_CTRL_AUTO;
}
+static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega20_fan_ctrl_start_smc_fan_control(hwmgr);
+ break;
+ default:
+ break;
+ }
+}
+
static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
@@ -2612,18 +2669,18 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
data->gfxclk_overdrive = false;
data->memclk_overdrive = false;
- ret = vega20_copy_table_from_smc(hwmgr,
- (uint8_t *)od_table,
- TABLE_OVERDRIVE);
+ ret = smum_smc_table_manager(hwmgr,
+ (uint8_t *)od_table,
+ TABLE_OVERDRIVE, true);
PP_ASSERT_WITH_CODE(!ret,
"Failed to export overdrive table!",
return ret);
break;
case PP_OD_COMMIT_DPM_TABLE:
- ret = vega20_copy_table_to_smc(hwmgr,
- (uint8_t *)od_table,
- TABLE_OVERDRIVE);
+ ret = smum_smc_table_manager(hwmgr,
+ (uint8_t *)od_table,
+ TABLE_OVERDRIVE, false);
PP_ASSERT_WITH_CODE(!ret,
"Failed to import overdrive table!",
return ret);
@@ -2847,8 +2904,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
- result = vega20_copy_table_to_smc(hwmgr,
- (uint8_t *)wm_table, TABLE_WATERMARKS);
+ result = smum_smc_table_manager(hwmgr,
+ (uint8_t *)wm_table, TABLE_WATERMARKS, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to update WMTABLE!",
return result);
@@ -3118,6 +3175,34 @@ static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
return result;
}
+static int conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
@@ -3153,14 +3238,14 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = i + 1;
+ workload_type = conv_power_profile_to_pplib_workload(i);
result = vega20_get_activity_monitor_coeff(hwmgr,
(uint8_t *)(&activity_monitor), workload_type);
PP_ASSERT_WITH_CODE(!result,
"[GetPowerProfile] Failed to get activity monitor!",
return result);
- size += sprintf(buf + size, "%2d(%14s%s)\n",
+ size += sprintf(buf + size, "%2d %14s%s:\n",
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
@@ -3226,10 +3311,15 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- int result = 0;
+ int workload_type, result = 0;
hwmgr->power_profile_mode = input[size];
+ if (hwmgr->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", hwmgr->power_profile_mode);
+ return -EINVAL;
+ }
+
if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size < 10)
return -EINVAL;
@@ -3296,8 +3386,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
return result);
}
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << hwmgr->power_profile_mode);
+ 1 << workload_type);
return 0;
}
@@ -3427,15 +3520,25 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.disable_smc_firmware_ctf =
vega20_thermal_disable_alert,
/* fan control related */
+ .get_fan_speed_percent =
+ vega20_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent =
+ vega20_fan_ctrl_set_fan_speed_percent,
.get_fan_speed_info =
vega20_fan_ctrl_get_fan_speed_info,
.get_fan_speed_rpm =
vega20_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_speed_rpm =
+ vega20_fan_ctrl_set_fan_speed_rpm,
.get_fan_control_mode =
vega20_get_fan_control_mode,
+ .set_fan_control_mode =
+ vega20_set_fan_control_mode,
/* smu memory related */
.notify_cac_buffer_info =
vega20_notify_cac_buffer_info,
+ .enable_mgpu_fan_boost =
+ vega20_enable_mgpu_fan_boost,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
index 5f1f7a32ac24..e5f7f8230065 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
@@ -100,9 +100,8 @@ static void dump_pptable(PPTable_t *pptable)
pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage);
- pr_info("padding8_limits[0] = 0x%02x\n", pptable->padding8_limits[0]);
- pr_info("padding8_limits[1] = 0x%02x\n", pptable->padding8_limits[1]);
- pr_info("padding8_limits[2] = 0x%02x\n", pptable->padding8_limits[2]);
+ pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits);
+ pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit);
pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc);
pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
@@ -417,8 +416,8 @@ static void dump_pptable(PPTable_t *pptable)
pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid);
- pr_info("FanGainVrVddc = %d\n", pptable->FanGainVrVddc);
- pr_info("FanGainVrMvdd = %d\n", pptable->FanGainVrMvdd);
+ pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
+ pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
pr_info("FanGainPlx = %d\n", pptable->FanGainPlx);
pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
@@ -533,23 +532,20 @@ static void dump_pptable(PPTable_t *pptable)
pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc);
- for (i = 0; i < 14; i++)
- pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
+ pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm);
+ pr_info("padding16_Fan = %d\n", pptable->padding16_Fan);
+
+ pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
+ pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
- pr_info("Liquid1_I2C_address = 0x%x\n", pptable->Liquid1_I2C_address);
- pr_info("Liquid2_I2C_address = 0x%x\n", pptable->Liquid2_I2C_address);
- pr_info("Vr_I2C_address = 0x%x\n", pptable->Vr_I2C_address);
- pr_info("Plx_I2C_address = 0x%x\n", pptable->Plx_I2C_address);
+ pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
+ pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
- pr_info("Liquid_I2C_LineSCL = 0x%x\n", pptable->Liquid_I2C_LineSCL);
- pr_info("Liquid_I2C_LineSDA = 0x%x\n", pptable->Liquid_I2C_LineSDA);
- pr_info("Vr_I2C_LineSCL = 0x%x\n", pptable->Vr_I2C_LineSCL);
- pr_info("Vr_I2C_LineSDA = 0x%x\n", pptable->Vr_I2C_LineSDA);
+ for (i = 0; i < 11; i++)
+ pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
- pr_info("Plx_I2C_LineSCL = 0x%x\n", pptable->Plx_I2C_LineSCL);
- pr_info("Plx_I2C_LineSDA = 0x%x\n", pptable->Plx_I2C_LineSDA);
- pr_info("VrSensorPresent = 0x%x\n", pptable->VrSensorPresent);
- pr_info("LiquidSensorPresent = 0x%x\n", pptable->LiquidSensorPresent);
+ for (i = 0; i < 3; i++)
+ pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]);
pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
@@ -611,6 +607,24 @@ static void dump_pptable(PPTable_t *pptable)
pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
+ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+ pr_info("I2cControllers[%d]:\n", i);
+ pr_info(" .Enabled = %d\n",
+ pptable->I2cControllers[i].Enabled);
+ pr_info(" .SlaveAddress = 0x%x\n",
+ pptable->I2cControllers[i].SlaveAddress);
+ pr_info(" .ControllerPort = %d\n",
+ pptable->I2cControllers[i].ControllerPort);
+ pr_info(" .ControllerName = %d\n",
+ pptable->I2cControllers[i].ControllerName);
+ pr_info(" .ThermalThrottler = %d\n",
+ pptable->I2cControllers[i].ThermalThrottler);
+ pr_info(" .I2cProtocol = %d\n",
+ pptable->I2cControllers[i].I2cProtocol);
+ pr_info(" .I2cSpeed = %d\n",
+ pptable->I2cControllers[i].I2cSpeed);
+ }
+
for (i = 0; i < 10; i++)
pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]);
@@ -661,50 +675,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
return 0;
}
-static int copy_clock_limits_array(
- struct pp_hwmgr *hwmgr,
- uint32_t **pptable_info_array,
- const uint32_t *pptable_array,
- uint32_t power_saving_clock_count)
-{
- uint32_t array_size, i;
- uint32_t *table;
-
- array_size = sizeof(uint32_t) * power_saving_clock_count;
- table = kzalloc(array_size, GFP_KERNEL);
- if (NULL == table)
- return -ENOMEM;
-
- for (i = 0; i < power_saving_clock_count; i++)
- table[i] = pptable_array[i];
-
- *pptable_info_array = table;
-
- return 0;
-}
-
-static int copy_overdrive_settings_limits_array(
- struct pp_hwmgr *hwmgr,
- uint32_t **pptable_info_array,
- const uint32_t *pptable_array,
- uint32_t od_setting_count)
-{
- uint32_t array_size, i;
- uint32_t *table;
-
- array_size = sizeof(uint32_t) * od_setting_count;
- table = kzalloc(array_size, GFP_KERNEL);
- if (NULL == table)
- return -ENOMEM;
-
- for (i = 0; i < od_setting_count; i++)
- table[i] = pptable_array[i];
-
- *pptable_info_array = table;
-
- return 0;
-}
-
static int copy_overdrive_feature_capabilities_array(
struct pp_hwmgr *hwmgr,
uint8_t **pptable_info_array,
@@ -721,7 +691,7 @@ static int copy_overdrive_feature_capabilities_array(
return -ENOMEM;
for (i = 0; i < od_feature_count; i++) {
- table[i] = pptable_array[i];
+ table[i] = le32_to_cpu(pptable_array[i]);
if (table[i])
od_supported = true;
}
@@ -737,29 +707,19 @@ static int copy_overdrive_feature_capabilities_array(
static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
{
- struct atom_smc_dpm_info_v4_3 *smc_dpm_table;
+ struct atom_smc_dpm_info_v4_4 *smc_dpm_table;
int index = GetIndexIntoMasterDataTable(smc_dpm_info);
+ int i;
PP_ASSERT_WITH_CODE(
smc_dpm_table = smu_atom_get_data_table(hwmgr->adev, index, NULL, NULL, NULL),
"[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
return -1);
- ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table->liquid1_i2c_address;
- ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table->liquid2_i2c_address;
- ppsmc_pptable->Vr_I2C_address = smc_dpm_table->vr_i2c_address;
- ppsmc_pptable->Plx_I2C_address = smc_dpm_table->plx_i2c_address;
-
- ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table->liquid_i2c_linescl;
- ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table->liquid_i2c_linesda;
- ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table->vr_i2c_linescl;
- ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table->vr_i2c_linesda;
-
- ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table->plx_i2c_linescl;
- ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table->plx_i2c_linesda;
- ppsmc_pptable->VrSensorPresent = smc_dpm_table->vrsensorpresent;
- ppsmc_pptable->LiquidSensorPresent = smc_dpm_table->liquidsensorpresent;
-
+ memset(ppsmc_pptable->Padding32,
+ 0,
+ sizeof(struct atom_smc_dpm_info_v4_4) -
+ sizeof(struct atom_common_table_header));
ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
@@ -818,6 +778,24 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
+ if ((smc_dpm_table->table_header.format_revision == 4) &&
+ (smc_dpm_table->table_header.content_revision == 4)) {
+ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+ ppsmc_pptable->I2cControllers[i].Enabled =
+ smc_dpm_table->i2ccontrollers[i].enabled;
+ ppsmc_pptable->I2cControllers[i].SlaveAddress =
+ smc_dpm_table->i2ccontrollers[i].slaveaddress;
+ ppsmc_pptable->I2cControllers[i].ControllerPort =
+ smc_dpm_table->i2ccontrollers[i].controllerport;
+ ppsmc_pptable->I2cControllers[i].ThermalThrottler =
+ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+ ppsmc_pptable->I2cControllers[i].I2cProtocol =
+ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+ ppsmc_pptable->I2cControllers[i].I2cSpeed =
+ smc_dpm_table->i2ccontrollers[i].i2cspeed;
+ }
+ }
+
return 0;
}
@@ -834,6 +812,8 @@ static int init_powerplay_table_information(
hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
+ hwmgr->thermal_controller.fanInfo.ulMinRPM = 0;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM = powerplay_table->smcPPTable.FanMaximumRpm;
set_hw_cap(hwmgr,
ATOM_VEGA20_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
@@ -842,34 +822,40 @@ static int init_powerplay_table_information(
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
- od_feature_count = (powerplay_table->OverDrive8Table.ODFeatureCount > ATOM_VEGA20_ODFEATURE_COUNT) ?
- ATOM_VEGA20_ODFEATURE_COUNT : powerplay_table->OverDrive8Table.ODFeatureCount;
- od_setting_count = (powerplay_table->OverDrive8Table.ODSettingCount > ATOM_VEGA20_ODSETTING_COUNT) ?
- ATOM_VEGA20_ODSETTING_COUNT : powerplay_table->OverDrive8Table.ODSettingCount;
+ od_feature_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) >
+ ATOM_VEGA20_ODFEATURE_COUNT) ?
+ ATOM_VEGA20_ODFEATURE_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount);
+ od_setting_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) >
+ ATOM_VEGA20_ODSETTING_COUNT) ?
+ ATOM_VEGA20_ODSETTING_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount);
copy_overdrive_feature_capabilities_array(hwmgr,
&pptable_information->od_feature_capabilities,
powerplay_table->OverDrive8Table.ODFeatureCapabilities,
od_feature_count);
- copy_overdrive_settings_limits_array(hwmgr,
+ phm_copy_overdrive_settings_limits_array(hwmgr,
&pptable_information->od_settings_max,
powerplay_table->OverDrive8Table.ODSettingsMax,
od_setting_count);
- copy_overdrive_settings_limits_array(hwmgr,
+ phm_copy_overdrive_settings_limits_array(hwmgr,
&pptable_information->od_settings_min,
powerplay_table->OverDrive8Table.ODSettingsMin,
od_setting_count);
}
- pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
- pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
- pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
- pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
- pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
+ pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1);
+ pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2);
+ pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit);
+ pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit);
+ pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit);
- pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
+ pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp);
- hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE];
+ hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
disable_power_control = 0;
if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit)
@@ -877,13 +863,16 @@ static int init_powerplay_table_information(
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerControl);
if (powerplay_table->PowerSavingClockTable.ucTableRevision == 1) {
- power_saving_clock_count = (powerplay_table->PowerSavingClockTable.PowerSavingClockCount >= ATOM_VEGA20_PPCLOCK_COUNT) ?
- ATOM_VEGA20_PPCLOCK_COUNT : powerplay_table->PowerSavingClockTable.PowerSavingClockCount;
- copy_clock_limits_array(hwmgr,
+ power_saving_clock_count =
+ (le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount) >=
+ ATOM_VEGA20_PPCLOCK_COUNT) ?
+ ATOM_VEGA20_PPCLOCK_COUNT :
+ le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount);
+ phm_copy_clock_limits_array(hwmgr,
&pptable_information->power_saving_clock_max,
powerplay_table->PowerSavingClockTable.PowerSavingClockMax,
power_saving_clock_count);
- copy_clock_limits_array(hwmgr,
+ phm_copy_clock_limits_array(hwmgr,
&pptable_information->power_saving_clock_min,
powerplay_table->PowerSavingClockTable.PowerSavingClockMin,
power_saving_clock_count);
@@ -893,7 +882,15 @@ static int init_powerplay_table_information(
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
- memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t));
+ if (powerplay_table->smcPPTable.Version <= 2)
+ memcpy(pptable_information->smc_pptable,
+ &(powerplay_table->smcPPTable),
+ sizeof(PPTable_t) -
+ sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT);
+ else
+ memcpy(pptable_information->smc_pptable,
+ &(powerplay_table->smcPPTable),
+ sizeof(PPTable_t));
result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
index 2984ddd5428c..ede54e87e287 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
@@ -29,6 +29,78 @@
#include "soc15_common.h"
#include "pp_debug.h"
+static int vega20_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = hwmgr->backend;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
+ ret = vega20_enable_smc_features(
+ hwmgr, false,
+ data->smu_features[GNLD_FAN_CONTROL].
+ smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Disable FAN CONTROL feature Failed!",
+ return ret);
+ data->smu_features[GNLD_FAN_CONTROL].enabled = false;
+ }
+
+ return ret;
+}
+
+int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = hwmgr->backend;
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ return vega20_disable_fan_control_feature(hwmgr);
+
+ return 0;
+}
+
+static int vega20_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = hwmgr->backend;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
+ ret = vega20_enable_smc_features(
+ hwmgr, true,
+ data->smu_features[GNLD_FAN_CONTROL].
+ smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Enable FAN CONTROL feature Failed!",
+ return ret);
+ data->smu_features[GNLD_FAN_CONTROL].enabled = true;
+ }
+
+ return ret;
+}
+
+int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = hwmgr->backend;
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ return vega20_enable_fan_control_feature(hwmgr);
+
+ return 0;
+}
+
+static int vega20_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, TMIN, 0));
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
+
+ return 0;
+}
+
static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
int ret = 0;
@@ -37,20 +109,67 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
PPSMC_MSG_GetCurrentRpm)) == 0,
"Attempt to get current RPM from SMC Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
- current_rpm)) == 0,
- "Attempt to read current RPM from SMC Failed!",
- return ret);
+ *current_rpm = smum_get_argument(hwmgr);
return 0;
}
+int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+ uint32_t *speed)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ uint32_t current_rpm, percent = 0;
+ int ret = 0;
+
+ ret = vega20_get_current_rpm(hwmgr, &current_rpm);
+ if (ret)
+ return ret;
+
+ percent = current_rpm * 100 / pp_table->FanMaximumRpm;
+
+ *speed = percent > 100 ? 100 : percent;
+
+ return 0;
+}
+
+int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100;
+ uint32_t duty;
+ uint64_t tmp64;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
memset(fan_speed_info, 0, sizeof(*fan_speed_info));
- fan_speed_info->supports_percent_read = false;
- fan_speed_info->supports_percent_write = false;
+ fan_speed_info->supports_percent_read = true;
+ fan_speed_info->supports_percent_write = true;
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
@@ -64,6 +183,31 @@ int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
return vega20_get_current_rpm(hwmgr, speed);
}
+int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t tach_period, crystal_clock_freq;
+ int result = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
+ result = vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
+ if (result)
+ return result;
+ }
+
+ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+
+ return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
+}
+
/**
* Reads the remote temperature from the SIslands thermal controller.
*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
index 2a6d49fec4e0..2d1769bbd24e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
@@ -50,15 +50,22 @@ struct vega20_temperature {
#define FDO_PWM_MODE_STATIC_RPM 5
extern int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info);
-extern int vega20_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
+extern int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
+ uint32_t speed);
+extern int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+ uint32_t *speed);
+extern int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+ uint32_t speed);
+extern int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
+extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index a6d92128b19c..e5a60aa44b5d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -328,6 +328,8 @@ struct pp_hwmgr_func {
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
+ int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
+ int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
@@ -732,7 +734,6 @@ struct pp_hwmgr {
void *smu_backend;
const struct pp_smumgr_func *smumgr_funcs;
bool is_kicker;
- bool reload_fw;
enum PP_DAL_POWERLEVEL dal_power_level;
struct phm_dynamic_state_info dyn_state;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 71191deb4e76..2998a49960ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x11
+#define SMU11_DRIVER_IF_VERSION 0x12
#define PPTABLE_V20_SMU_VERSION 2
@@ -165,7 +165,7 @@
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
-
+#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT )
#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
@@ -186,6 +186,9 @@
#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000
#define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
#define VR_MAPPING_VR_SELECT_MASK 0x01
#define VR_MAPPING_VR_SELECT_SHIFT 0x00
@@ -208,15 +211,17 @@
#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2
#define THROTTLER_STATUS_TEMP_HBM_BIT 3
#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4
-#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5
-#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6
-#define THROTTLER_STATUS_TEMP_PLX_BIT 7
-#define THROTTLER_STATUS_TEMP_SKIN_BIT 8
-#define THROTTLER_STATUS_TDC_GFX_BIT 9
-#define THROTTLER_STATUS_TDC_SOC_BIT 10
-#define THROTTLER_STATUS_PPT_BIT 11
-#define THROTTLER_STATUS_FIT_BIT 12
-#define THROTTLER_STATUS_PPM_BIT 13
+#define THROTTLER_STATUS_TEMP_VR_SOC_BIT 5
+#define THROTTLER_STATUS_TEMP_VR_MEM0_BIT 6
+#define THROTTLER_STATUS_TEMP_VR_MEM1_BIT 7
+#define THROTTLER_STATUS_TEMP_LIQUID_BIT 8
+#define THROTTLER_STATUS_TEMP_PLX_BIT 9
+#define THROTTLER_STATUS_TEMP_SKIN_BIT 10
+#define THROTTLER_STATUS_TDC_GFX_BIT 11
+#define THROTTLER_STATUS_TDC_SOC_BIT 12
+#define THROTTLER_STATUS_PPT_BIT 13
+#define THROTTLER_STATUS_FIT_BIT 14
+#define THROTTLER_STATUS_PPM_BIT 15
#define TABLE_TRANSFER_OK 0x0
@@ -236,6 +241,58 @@
#define XGMI_STATE_D0 1
#define XGMI_STATE_D3 0
+typedef enum {
+ I2C_CONTROLLER_PORT_0 = 0,
+ I2C_CONTROLLER_PORT_1 = 1,
+} I2cControllerPort_e;
+
+typedef enum {
+ I2C_CONTROLLER_NAME_VR_GFX = 0,
+ I2C_CONTROLLER_NAME_VR_SOC,
+ I2C_CONTROLLER_NAME_VR_VDDCI,
+ I2C_CONTROLLER_NAME_VR_HBM,
+ I2C_CONTROLLER_NAME_LIQUID_0,
+ I2C_CONTROLLER_NAME_LIQUID_1,
+ I2C_CONTROLLER_NAME_PLX,
+ I2C_CONTROLLER_NAME_COUNT,
+} I2cControllerName_e;
+
+typedef enum {
+ I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
+ I2C_CONTROLLER_THROTTLER_VR_GFX,
+ I2C_CONTROLLER_THROTTLER_VR_SOC,
+ I2C_CONTROLLER_THROTTLER_VR_VDDCI,
+ I2C_CONTROLLER_THROTTLER_VR_HBM,
+ I2C_CONTROLLER_THROTTLER_LIQUID_0,
+ I2C_CONTROLLER_THROTTLER_LIQUID_1,
+ I2C_CONTROLLER_THROTTLER_PLX,
+} I2cControllerThrottler_e;
+
+typedef enum {
+ I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
+ I2C_CONTROLLER_PROTOCOL_VR_IR35217,
+ I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
+ I2C_CONTROLLER_PROTOCOL_SPARE_0,
+ I2C_CONTROLLER_PROTOCOL_SPARE_1,
+ I2C_CONTROLLER_PROTOCOL_SPARE_2,
+} I2cControllerProtocol_e;
+
+typedef enum {
+ I2C_CONTROLLER_SPEED_SLOW = 0,
+ I2C_CONTROLLER_SPEED_FAST = 1,
+} I2cControllerSpeed_e;
+
+typedef struct {
+ uint32_t Enabled;
+ uint32_t SlaveAddress;
+ uint32_t ControllerPort;
+ uint32_t ControllerName;
+
+ uint32_t ThermalThrottler;
+ uint32_t I2cProtocol;
+ uint32_t I2cSpeed;
+} I2cControllerConfig_t;
+
typedef struct {
uint32_t a;
uint32_t b;
@@ -269,6 +326,12 @@ typedef enum {
} PPCLK_e;
typedef enum {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_COUNT,
+} POWER_SOURCE_e;
+
+typedef enum {
VOLTAGE_MODE_AVFS = 0,
VOLTAGE_MODE_AVFS_SS,
VOLTAGE_MODE_SS,
@@ -328,8 +391,8 @@ typedef struct {
uint16_t PpmTemperatureThreshold;
uint8_t MemoryOnPackage;
- uint8_t padding8_limits[3];
-
+ uint8_t padding8_limits;
+ uint16_t Tvr_SocLimit;
uint16_t UlvVoltageOffsetSoc;
uint16_t UlvVoltageOffsetGfx;
@@ -400,8 +463,8 @@ typedef struct {
uint16_t FanGainEdge;
uint16_t FanGainHotspot;
uint16_t FanGainLiquid;
- uint16_t FanGainVrVddc;
- uint16_t FanGainVrMvdd;
+ uint16_t FanGainVrGfx;
+ uint16_t FanGainVrSoc;
uint16_t FanGainPlx;
uint16_t FanGainHbm;
uint16_t FanPwmMin;
@@ -438,7 +501,7 @@ typedef struct {
uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT];
uint8_t Padding8_GfxBtc[2];
- uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT];
+ int16_t DcBtcMin[AVFS_VOLTAGE_COUNT];
uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT];
@@ -461,24 +524,14 @@ typedef struct {
uint16_t MGpuFanBoostLimitRpm;
uint16_t padding16_Fan;
- uint32_t Reserved[13];
-
+ uint16_t FanGainVrMem0;
+ uint16_t FanGainVrMem1;
+ uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT];
- uint8_t Liquid1_I2C_address;
- uint8_t Liquid2_I2C_address;
- uint8_t Vr_I2C_address;
- uint8_t Plx_I2C_address;
+ uint32_t Reserved[11];
- uint8_t Liquid_I2C_LineSCL;
- uint8_t Liquid_I2C_LineSDA;
- uint8_t Vr_I2C_LineSCL;
- uint8_t Vr_I2C_LineSDA;
-
- uint8_t Plx_I2C_LineSCL;
- uint8_t Plx_I2C_LineSDA;
- uint8_t VrSensorPresent;
- uint8_t LiquidSensorPresent;
+ uint32_t Padding32[3];
uint16_t MaxVoltageStepGfx;
uint16_t MaxVoltageStepSoc;
@@ -545,6 +598,8 @@ typedef struct {
uint8_t FllGfxclkSpreadPercent;
uint16_t FllGfxclkSpreadFreq;
+ I2cControllerConfig_t I2cControllers[I2C_CONTROLLER_NAME_COUNT];
+
uint32_t BoardReserved[10];
@@ -601,7 +656,9 @@ typedef struct {
uint16_t TemperatureHotspot ;
uint16_t TemperatureHBM ;
uint16_t TemperatureVrGfx ;
- uint16_t TemperatureVrMem ;
+ uint16_t TemperatureVrSoc ;
+ uint16_t TemperatureVrMem0 ;
+ uint16_t TemperatureVrMem1 ;
uint16_t TemperatureLiquid ;
uint16_t TemperaturePlx ;
uint32_t ThrottlerStatus ;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 18643e06bc6f..669bd0c2a16c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2269,11 +2269,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case LowSclkInterruptThreshold:
return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
}
+ break;
}
pr_debug("can't get the offset of type %x member %x\n", type, member);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index ec14798e87b6..bc8375cbf297 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -302,16 +302,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
hwmgr->avfs_supported = false;
}
- /* To initialize all clock gating before RLC loaded and running.*/
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
-
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
@@ -2331,6 +2321,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case UvdBootLevel:
@@ -2340,6 +2331,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
case LowSclkInterruptThreshold:
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
+ break;
}
pr_warn("can't get the offset of type %x member %x\n", type, member);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 73aa368a454e..375ccf6ff5f2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -232,26 +232,25 @@ static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
static int iceland_start_smu(struct pp_hwmgr *hwmgr)
{
+ struct iceland_smumgr *priv = hwmgr->smu_backend;
int result;
- result = iceland_smu_upload_firmware_image(hwmgr);
- if (result)
- return result;
- result = iceland_smu_start_smc(hwmgr);
- if (result)
- return result;
-
if (!smu7_is_smc_ram_running(hwmgr)) {
- pr_info("smu not running, upload firmware again \n");
result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(hwmgr);
- if (result)
- return result;
+ iceland_smu_start_smc(hwmgr);
}
+ /* Setup SoftRegsStart here to visit the register UcodeLoadStatus
+ * to check fw loading state
+ */
+ smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, SoftRegisters),
+ &(priv->smu7_data.soft_regs_start), 0x40000);
+
result = smu7_request_smu_load_fw(hwmgr);
return result;
@@ -2237,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case LowSclkInterruptThreshold:
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
+ break;
}
pr_warn("can't get the offset of type %x member %x\n", type, member);
return 0;
@@ -2662,7 +2663,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
.smu_fini = &smu7_smu_fini,
.start_smu = &iceland_start_smu,
.check_fw_load_finish = &smu7_check_fw_load_finish,
- .request_smu_load_fw = &smu7_reload_firmware,
+ .request_smu_load_fw = &smu7_request_smu_load_fw,
.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 6f961dec2088..d0eb8ab50148 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -186,40 +186,12 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
return 0;
}
-/* sdma is disabled by default in vbios, need to re-enable in driver */
-static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
-{
- smu10_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PowerUpSdma);
-}
-
-static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
-{
- smu10_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PowerDownSdma);
-}
-
-/* vcn is disabled by default in vbios, need to re-enable in driver */
-static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
-{
- smu10_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerUpVcn, 0);
-}
-
-static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
-{
- smu10_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerDownVcn, 0);
-}
-
static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
{
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
if (priv) {
- smu10_smc_disable_sdma(hwmgr);
- smu10_smc_disable_vcn(hwmgr);
amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
&priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_WMTABLE].table);
@@ -243,8 +215,7 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
if (smu10_verify_smc_interface(hwmgr))
return -EINVAL;
- smu10_smc_enable_sdma(hwmgr);
- smu10_smc_enable_vcn(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 186dafc7f166..3f51d545e8ff 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -302,44 +302,6 @@ int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_
return 0;
}
-/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
-
-static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
-{
- uint32_t result = 0;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- pr_info("UCode type is out of range! \n");
- result = 0;
- }
-
- return result;
-}
-
static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
uint32_t fw_type,
struct SMU_Entry *entry)
@@ -381,10 +343,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
uint32_t fw_to_load;
int r = 0;
- if (!hwmgr->reload_fw) {
- pr_info("skip reloading...\n");
- return 0;
- }
+ amdgpu_ucode_init_bo(hwmgr->adev);
if (smu_data->soft_regs_start)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
@@ -467,10 +426,13 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
- if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
- pr_err("Fail to Request SMU Load uCode");
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
- return r;
+ r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
+ if (!r)
+ return 0;
+
+ pr_err("SMU load firmware failed\n");
failed:
kfree(smu_data->toc);
@@ -482,13 +444,12 @@ failed:
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
uint32_t ret;
ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
- fw_mask, fw_mask);
+ fw_type, fw_type);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index f7e3bc22bb93..f836d30fdd44 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -658,11 +658,10 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
uint32_t smc_address;
+ uint32_t fw_to_check = 0;
+ int ret;
- if (!hwmgr->reload_fw) {
- pr_info("skip reloading...\n");
- return 0;
- }
+ amdgpu_ucode_init_bo(hwmgr->adev);
smu8_smu_populate_firmware_entries(hwmgr);
@@ -689,28 +688,9 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
smu8_smu->toc_entry_power_profiling_index);
- return smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
smu8_smu->toc_entry_initialize_index);
-}
-
-static int smu8_start_smu(struct pp_hwmgr *hwmgr)
-{
- int ret = 0;
- uint32_t fw_to_check = 0;
- struct amdgpu_device *adev = hwmgr->adev;
-
- uint32_t index = SMN_MP1_SRAM_START_ADDR +
- SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, Version);
-
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
- hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
- adev->pm.fw_version = hwmgr->smu_version >> 8;
fw_to_check = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
@@ -724,17 +704,38 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- ret = smu8_request_smu_load_fw(hwmgr);
- if (ret)
+ ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
+ if (ret) {
pr_err("SMU firmware load failed\n");
-
- smu8_check_fw_load_finish(hwmgr, fw_to_check);
+ return ret;
+ }
ret = smu8_load_mec_firmware(hwmgr);
- if (ret)
+ if (ret) {
pr_err("Mec Firmware load failed\n");
+ return ret;
+ }
- return ret;
+ return 0;
+}
+
+static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ uint32_t index = SMN_MP1_SRAM_START_ADDR +
+ SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, Version);
+
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+ hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+ adev->pm.fw_version = hwmgr->smu_version >> 8;
+
+ return smu8_request_smu_load_fw(hwmgr);
}
static int smu8_smu_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index ae8378ed32ee..3ed6c5f1e5cf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -192,6 +192,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
static int tonga_start_smu(struct pp_hwmgr *hwmgr)
{
+ struct tonga_smumgr *priv = hwmgr->smu_backend;
int result;
/* Only start SMC if SMC RAM is not running */
@@ -209,6 +210,14 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
}
}
+ /* Setup SoftRegsStart here to visit the register UcodeLoadStatus
+ * to check fw loading state
+ */
+ smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, SoftRegisters),
+ &(priv->smu7_data.soft_regs_start), 0x40000);
+
result = smu7_request_smu_load_fw(hwmgr);
return result;
@@ -2619,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case UvdBootLevel:
@@ -2628,6 +2638,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
case LowSclkInterruptThreshold:
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
+ break;
}
pr_warn("can't get the offset of type %x member %x\n", type, member);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 5d19115f410c..c81acc3192ad 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -88,8 +88,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
return 0;
}
-static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
- uint32_t *features_enabled)
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint32_t feature_mask)
+{
+ int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
+ PPSMC_MSG_DisableSmuFeatures;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ msg, feature_mask);
+}
+
+int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ uint64_t *features_enabled)
{
if (features_enabled == NULL)
return -EINVAL;
@@ -102,9 +112,9 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- uint32_t features_enabled = 0;
+ uint64_t features_enabled = 0;
- vega10_get_smc_features(hwmgr, &features_enabled);
+ vega10_get_enabled_smc_features(hwmgr, &features_enabled);
if (features_enabled & SMC_DPM_FEATURES)
return true;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 424e868bc768..bad760f22624 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -42,6 +42,10 @@ struct vega10_smumgr {
struct smu_table_array smu_tables;
};
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint32_t feature_mask);
+int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ uint64_t *features_enabled);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 7f0e2109f40d..ddb801517667 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -37,8 +37,8 @@
* @param hwmgr the address of the HW manager
* @param table_id the driver's table ID to copy from
*/
-int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
+static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
@@ -75,8 +75,8 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
* @param hwmgr the address of the HW manager
* @param table_id the table to copy from
*/
-int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
+static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
@@ -351,6 +351,19 @@ static int vega12_start_smu(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega12_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
+ uint16_t table_id, bool rw)
+{
+ int ret;
+
+ if (rw)
+ ret = vega12_copy_table_from_smc(hwmgr, table, table_id);
+ else
+ ret = vega12_copy_table_to_smc(hwmgr, table, table_id);
+
+ return ret;
+}
+
const struct pp_smumgr_func vega12_smu_funcs = {
.smu_init = &vega12_smu_init,
.smu_fini = &vega12_smu_fini,
@@ -362,4 +375,5 @@ const struct pp_smumgr_func vega12_smu_funcs = {
.upload_pptable_settings = NULL,
.is_dpm_running = vega12_is_dpm_running,
.get_argument = smu9_get_argument,
+ .smc_table_manager = vega12_smc_table_manager,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
index b285cbc04019..aeec965ce81f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
@@ -48,10 +48,6 @@ struct vega12_smumgr {
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
-int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint64_t feature_mask);
int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index fe7f71079e0e..b7ff7d4d6f44 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -148,19 +148,11 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
return (ret == PPSMC_Result_OK) ? 0 : -EIO;
}
-/*
- * Retrieve an argument from SMC.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param arg pointer to store the argument from SMC.
- * @return Always return 0.
- */
-int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
+static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-
- return 0;
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
/*
@@ -168,8 +160,8 @@ int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
* @param hwmgr the address of the HW manager
* @param table_id the driver's table ID to copy from
*/
-int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
+static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
@@ -208,8 +200,8 @@ int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
* @param hwmgr the address of the HW manager
* @param table_id the table to copy from
*/
-int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
+static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
@@ -345,18 +337,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
- &smc_features_low)) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
- return ret);
+ smc_features_low = vega20_get_argument(hwmgr);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr,
- &smc_features_high)) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
- return ret);
+ smc_features_high = vega20_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -574,6 +560,19 @@ static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
return false;
}
+static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
+ uint16_t table_id, bool rw)
+{
+ int ret;
+
+ if (rw)
+ ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
+ else
+ ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
+
+ return ret;
+}
+
const struct pp_smumgr_func vega20_smu_funcs = {
.smu_init = &vega20_smu_init,
.smu_fini = &vega20_smu_fini,
@@ -584,4 +583,6 @@ const struct pp_smumgr_func vega20_smu_funcs = {
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega20_is_dpm_running,
+ .get_argument = vega20_get_argument,
+ .smc_table_manager = vega20_smc_table_manager,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
index 505eb0d82e3b..77349c3f0162 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
@@ -47,11 +47,6 @@ struct vega20_smumgr {
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
-int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
-int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint64_t feature_mask);
int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 3d415fabbd93..9f71512b2510 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -2185,6 +2185,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case UvdBootLevel:
@@ -2194,6 +2195,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
case LowSclkInterruptThreshold:
return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
}
+ break;
}
pr_warn("can't get the offset of type %x member %x\n", type, member);
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index ef44202fb43f..e1b72782848c 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -348,19 +348,20 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
/*
* check if there is enough rotation memory available for planes
- * that need 90° and 270° rotation. Each plane has set its required
- * memory size in the ->plane_check() callback, here we only make
- * sure that the sums are less that the total usable memory.
+ * that need 90° and 270° rotion or planes that are compressed.
+ * Each plane has set its required memory size in the ->plane_check()
+ * callback, here we only make sure that the sums are less that the
+ * total usable memory.
*
* The rotation memory allocation algorithm (for each plane):
- * a. If no more rotated planes exist, all remaining rotate
- * memory in the bank is available for use by the plane.
- * b. If other rotated planes exist, and plane's layer ID is
- * DE_VIDEO1, it can use all the memory from first bank if
- * secondary rotation memory bank is available, otherwise it can
+ * a. If no more rotated or compressed planes exist, all remaining
+ * rotate memory in the bank is available for use by the plane.
+ * b. If other rotated or compressed planes exist, and plane's
+ * layer ID is DE_VIDEO1, it can use all the memory from first bank
+ * if secondary rotation memory bank is available, otherwise it can
* use up to half the bank's memory.
- * c. If other rotated planes exist, and plane's layer ID is not
- * DE_VIDEO1, it can use half of the available memory
+ * c. If other rotated or compressed planes exist, and plane's layer ID
+ * is not DE_VIDEO1, it can use half of the available memory.
*
* Note: this algorithm assumes that the order in which the planes are
* checked always has DE_VIDEO1 plane first in the list if it is
@@ -372,7 +373,9 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
/* first count the number of rotated planes */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
- if (pstate->rotation & MALIDP_ROTATED_MASK)
+ struct drm_framebuffer *fb = pstate->fb;
+
+ if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier)
rotated_planes++;
}
@@ -388,8 +391,9 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
+ struct drm_framebuffer *fb = pstate->fb;
- if (pstate->rotation & MALIDP_ROTATED_MASK) {
+ if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) {
/* process current plane */
rotated_planes--;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 3171ffaadd77..505f316a192e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -37,6 +37,8 @@
#include "malidp_hw.h"
#define MALIDP_CONF_VALID_TIMEOUT 250
+#define AFBC_HEADER_SIZE 16
+#define AFBC_SUPERBLK_ALIGNMENT 128
static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
u32 data[MALIDP_COEFFTAB_NUM_COEFFS])
@@ -258,8 +260,134 @@ static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
.atomic_commit_tail = malidp_atomic_commit_tail,
};
+static bool
+malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info;
+
+ if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
+ return false;
+ }
+
+ if (mode_cmd->modifier[0] &
+ ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
+ DRM_DEBUG_KMS("Unsupported modifiers\n");
+ return false;
+ }
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info) {
+ DRM_DEBUG_KMS("Unable to get the format information\n");
+ return false;
+ }
+
+ if (info->num_planes != 1) {
+ DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
+ return false;
+ }
+
+ if (mode_cmd->offsets[0] != 0) {
+ DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
+ return false;
+ }
+
+ switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
+ DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
+ return false;
+ }
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported AFBC block size\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int n_superblocks = 0;
+ const struct drm_format_info *info;
+ struct drm_gem_object *objs = NULL;
+ u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
+ u32 afbc_superblock_width = 0, afbc_size = 0;
+
+ switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ afbc_superblock_height = 16;
+ afbc_superblock_width = 16;
+ break;
+ default:
+ DRM_DEBUG_KMS("AFBC superblock size is not supported\n");
+ return false;
+ }
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ n_superblocks = (mode_cmd->width / afbc_superblock_width) *
+ (mode_cmd->height / afbc_superblock_height);
+
+ afbc_superblock_size = info->cpp[0] * afbc_superblock_width *
+ afbc_superblock_height;
+
+ afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
+ afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
+
+ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
+ DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n",
+ mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]);
+ return false;
+ }
+
+ objs = drm_gem_object_lookup(file, mode_cmd->handles[0]);
+ if (!objs) {
+ DRM_DEBUG_KMS("Failed to lookup GEM object\n");
+ return false;
+ }
+
+ if (objs->size < afbc_size) {
+ DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n",
+ objs->size, afbc_size);
+ drm_gem_object_put_unlocked(objs);
+ return false;
+ }
+
+ drm_gem_object_put_unlocked(objs);
+
+ return true;
+}
+
+static bool
+malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
+ return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
+
+ return false;
+}
+
+struct drm_framebuffer *
+malidp_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ if (mode_cmd->modifier[0]) {
+ if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_fb_create(dev, file, mode_cmd);
+}
+
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
+ .fb_create = malidp_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -752,6 +880,7 @@ static int malidp_bind(struct device *dev)
drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index e3eb0cb1f385..b76c86f18a56 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -55,6 +55,12 @@ struct malidp_plane {
const struct malidp_layer *layer;
};
+enum mmu_prefetch_mode {
+ MALIDP_PREFETCH_MODE_NONE,
+ MALIDP_PREFETCH_MODE_PARTIAL,
+ MALIDP_PREFETCH_MODE_FULL,
+};
+
struct malidp_plane_state {
struct drm_plane_state base;
@@ -63,6 +69,8 @@ struct malidp_plane_state {
/* internal format ID */
u8 format;
u8 n_planes;
+ enum mmu_prefetch_mode mmu_prefetch_mode;
+ u32 mmu_prefetch_pgsize;
};
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index c94a4422e0e9..7aad7dd80d8c 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -84,16 +84,48 @@ static const struct malidp_format_id malidp550_de_formats[] = {
};
static const struct malidp_layer malidp500_layers[] = {
- { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB },
- { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
- { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
+ /* id, base address, fb pointer address base, stride offset,
+ * yuv2rgb matrix offset, mmu control register offset, rotation_features
+ */
+ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE,
+ MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY },
+ { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE,
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE,
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
};
static const struct malidp_layer malidp550_layers[] = {
- { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB },
- { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
- { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE, 0 },
+ /* id, base address, fb pointer address base, stride offset,
+ * yuv2rgb matrix offset, mmu control register offset, rotation_features
+ */
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
+ MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE },
+};
+
+static const struct malidp_layer malidp650_layers[] = {
+ /* id, base address, fb pointer address base, stride offset,
+ * yuv2rgb matrix offset, mmu control register offset,
+ * rotation_features
+ */
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
+ MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL,
+ ROTATE_COMPRESSED },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
+ MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL,
+ ROTATE_NONE },
};
#define SE_N_SCALING_COEFFS 96
@@ -288,10 +320,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
{
- /* RGB888 or BGR888 can't be rotated */
- if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
- return -EINVAL;
-
/*
* Each layer needs enough rotation memory to fit 8 lines
* worth of pixel data. Required size is then:
@@ -384,7 +412,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
- int num_planes, u16 w, u16 h, u32 fmt_id)
+ int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP500_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +445,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP500_SE_MEMWRITE_OUT_SIZE);
+
+ if (rgb2yuv_coeffs) {
+ int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+ MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
+ }
+ }
+
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
return 0;
@@ -568,10 +607,6 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
{
u32 bytes_per_col;
- /* raw RGB888 or BGR888 can't be rotated */
- if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
- return -EINVAL;
-
switch (fmt) {
/* 8 lines at 4 bytes per pixel */
case DRM_FORMAT_ARGB2101010:
@@ -658,7 +693,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
- int num_planes, u16 w, u16 h, u32 fmt_id)
+ int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP550_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +725,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL);
+ if (rgb2yuv_coeffs) {
+ int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+ MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
+ }
+ }
+
return 0;
}
@@ -832,8 +877,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
.features = MALIDP_REGMAP_HAS_CLEARIRQ,
- .n_layers = ARRAY_SIZE(malidp550_layers),
- .layers = malidp550_layers,
+ .n_layers = ARRAY_SIZE(malidp650_layers),
+ .layers = malidp650_layers,
.de_irq_map = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP650_DE_IRQ_DRIFT |
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index ad2e96915d44..40155e2ea9d9 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -36,6 +36,12 @@ enum {
SE_MEMWRITE = BIT(5),
};
+enum rotation_features {
+ ROTATE_NONE, /* does not support rotation at all */
+ ROTATE_ANY, /* supports rotation on any buffers */
+ ROTATE_COMPRESSED, /* supports rotation only on compressed buffers */
+};
+
struct malidp_format_id {
u32 format; /* DRM fourcc */
u8 layer; /* bitmask of layers supporting it */
@@ -62,6 +68,8 @@ struct malidp_layer {
u16 ptr; /* address offset for the pointer register */
u16 stride_offset; /* offset to the first stride register. */
s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
+ u16 mmu_ctrl_offset; /* offset to the MMU control register */
+ enum rotation_features rot; /* type of rotation supported */
};
enum malidp_scaling_coeff_set {
@@ -191,7 +199,8 @@ struct malidp_hw {
* @param fmt_id - internal format ID of output buffer
*/
int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
- s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+ s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs);
/*
* Disable the writing to memory of the next frame's content.
@@ -379,4 +388,9 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
#define MALIDP_GAMMA_LUT_SIZE 4096
+#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \
+ AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \
+ AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \
+ AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC)
+
#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index ba6ae66387c9..91472e5e0c8b 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
s32 pitches[2];
u8 format;
u8 n_planes;
+ bool rgb2yuv_initialized;
+ const s16 *rgb2yuv_coeffs;
};
static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
static struct drm_connector_state *
malidp_mw_connector_duplicate_state(struct drm_connector *connector)
{
- struct malidp_mw_connector_state *mw_state;
+ struct malidp_mw_connector_state *mw_state, *mw_current_state;
if (WARN_ON(!connector->state))
return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
if (!mw_state)
return NULL;
- /* No need to preserve any of our driver-local data */
+ mw_current_state = to_mw_state(connector->state);
+ mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
+ mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
+
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
+static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
+ 47, 157, 16,
+ -26, -87, 112,
+ 112, -102, -10,
+ 16, 128, 128
+};
+
static int
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
}
mw_state->n_planes = n_planes;
+ if (fb->format->is_yuv)
+ mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
+
return 0;
}
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
conn_state->writeback_job = NULL;
-
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
- fb->width, fb->height, mw_state->format);
+ fb->width, fb->height, mw_state->format,
+ !mw_state->rgb2yuv_initialized ?
+ mw_state->rgb2yuv_coeffs : NULL);
+ mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
} else {
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
hwdev->hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 49c37f6dd63e..837a24d56675 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -10,11 +10,14 @@
* ARM Mali DP plane manipulation routines.
*/
+#include <linux/iommu.h>
+
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
@@ -36,6 +39,7 @@
#define LAYER_COMP_MASK (0x3 << 12)
#define LAYER_COMP_PIXEL (0x3 << 12)
#define LAYER_COMP_PLANE (0x2 << 12)
+#define LAYER_PMUL_ENABLE (0x1 << 14)
#define LAYER_ALPHA_OFFSET (16)
#define LAYER_ALPHA_MASK (0xff)
#define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
@@ -56,6 +60,13 @@
*/
#define MALIDP_ALPHA_LUT 0xffaa5500
+/* page sizes the MMU prefetcher can support */
+#define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES (SZ_4K | SZ_64K)
+#define MALIDP_MMU_PREFETCH_FULL_PGSIZES (SZ_1M | SZ_2M)
+
+/* readahead for partial-frame prefetch */
+#define MALIDP_MMU_PREFETCH_READAHEAD 8
+
static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
@@ -100,6 +111,9 @@ drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
state->format = m_state->format;
state->n_planes = m_state->n_planes;
+ state->mmu_prefetch_mode = m_state->mmu_prefetch_mode;
+ state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize;
+
return &state->base;
}
@@ -112,6 +126,12 @@ static void malidp_destroy_plane_state(struct drm_plane *plane,
kfree(m_state);
}
+static const char * const prefetch_mode_names[] = {
+ [MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE",
+ [MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL",
+ [MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL",
+};
+
static void malidp_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
@@ -120,6 +140,9 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
drm_printf(p, "\tformat_id=%u\n", ms->format);
drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
+ drm_printf(p, "\tmmu_prefetch_mode=%s\n",
+ prefetch_mode_names[ms->mmu_prefetch_mode]);
+ drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
}
static const struct drm_plane_funcs malidp_de_plane_funcs = {
@@ -173,6 +196,199 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
return 0;
}
+static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
+{
+ u32 pgsize_bitmap = 0;
+
+ if (iommu_present(&platform_bus_type)) {
+ struct iommu_domain *mmu_dom =
+ iommu_get_domain_for_dev(mp->base.dev->dev);
+
+ if (mmu_dom)
+ pgsize_bitmap = mmu_dom->pgsize_bitmap;
+ }
+
+ return pgsize_bitmap;
+}
+
+/*
+ * Check if the framebuffer is entirely made up of pages at least pgsize in
+ * size. Only a heuristic: assumes that each scatterlist entry has been aligned
+ * to the largest page size smaller than its length and that the MMU maps to
+ * the largest page size possible.
+ */
+static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
+ u32 pgsize)
+{
+ int i;
+
+ for (i = 0; i < ms->n_planes; i++) {
+ struct drm_gem_object *obj;
+ struct drm_gem_cma_object *cma_obj;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+
+ obj = drm_gem_fb_get_obj(ms->base.fb, i);
+ cma_obj = to_drm_gem_cma_obj(obj);
+
+ if (cma_obj->sgt)
+ sgt = cma_obj->sgt;
+ else
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!sgt)
+ return false;
+
+ sgl = sgt->sgl;
+
+ while (sgl) {
+ if (sgl->length < pgsize) {
+ if (!cma_obj->sgt)
+ kfree(sgt);
+ return false;
+ }
+
+ sgl = sg_next(sgl);
+ }
+ if (!cma_obj->sgt)
+ kfree(sgt);
+ }
+
+ return true;
+}
+
+/*
+ * Check if it is possible to enable partial-frame MMU prefetch given the
+ * current format, AFBC state and rotation.
+ */
+static bool malidp_partial_prefetch_supported(u32 format, u64 modifier,
+ unsigned int rotation)
+{
+ bool afbc, sparse;
+
+ /* rotation and horizontal flip not supported for partial prefetch */
+ if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X))
+ return false;
+
+ afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0);
+ sparse = modifier & AFBC_FORMAT_MOD_SPARSE;
+
+ switch (format) {
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_RGB565:
+ /* always supported */
+ return true;
+
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_BGR565:
+ /* supported, but if AFBC then must be sparse mode */
+ return (!afbc) || (afbc && sparse);
+
+ case DRM_FORMAT_BGR888:
+ /* supported, but not for AFBC */
+ return !afbc;
+
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_YUV420:
+ /* not supported */
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as
+ * long as the framebuffer is all large pages. Otherwise partial-frame prefetch
+ * is selected as long as it is supported for the current format. The selected
+ * page size for prefetch is returned in pgsize_bitmap.
+ */
+static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode
+ (struct malidp_plane_state *ms, u32 *pgsize_bitmap)
+{
+ u32 pgsizes;
+
+ /* get the full-frame prefetch page size(s) supported by the MMU */
+ pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES;
+
+ while (pgsizes) {
+ u32 largest_pgsize = 1 << __fls(pgsizes);
+
+ if (malidp_check_pages_threshold(ms, largest_pgsize)) {
+ *pgsize_bitmap = largest_pgsize;
+ return MALIDP_PREFETCH_MODE_FULL;
+ }
+
+ pgsizes -= largest_pgsize;
+ }
+
+ /* get the partial-frame prefetch page size(s) supported by the MMU */
+ pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES;
+
+ if (malidp_partial_prefetch_supported(ms->base.fb->format->format,
+ ms->base.fb->modifier,
+ ms->base.rotation)) {
+ /* partial prefetch using the smallest page size */
+ *pgsize_bitmap = 1 << __ffs(pgsizes);
+ return MALIDP_PREFETCH_MODE_PARTIAL;
+ }
+ *pgsize_bitmap = 0;
+ return MALIDP_PREFETCH_MODE_NONE;
+}
+
+static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode,
+ u8 readahead, u8 n_planes, u32 pgsize)
+{
+ u32 mmu_ctrl = 0;
+
+ if (mode != MALIDP_PREFETCH_MODE_NONE) {
+ mmu_ctrl |= MALIDP_MMU_CTRL_EN;
+
+ if (mode == MALIDP_PREFETCH_MODE_PARTIAL) {
+ mmu_ctrl |= MALIDP_MMU_CTRL_MODE;
+ mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead);
+ }
+
+ if (pgsize == SZ_64K || pgsize == SZ_2M) {
+ int i;
+
+ for (i = 0; i < n_planes; i++)
+ mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i);
+ }
+ }
+
+ return mmu_ctrl;
+}
+
+static void malidp_de_prefetch_settings(struct malidp_plane *mp,
+ struct malidp_plane_state *ms)
+{
+ if (!mp->layer->mmu_ctrl_offset)
+ return;
+
+ /* get the page sizes supported by the MMU */
+ ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp);
+ ms->mmu_prefetch_mode =
+ malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize);
+}
+
static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -180,6 +396,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
struct malidp_plane_state *ms = to_malidp_plane_state(state);
bool rotated = state->rotation & MALIDP_ROTATED_MASK;
struct drm_framebuffer *fb;
+ u16 pixel_alpha = state->pixel_blend_mode;
int i, ret;
if (!state->crtc || !state->fb)
@@ -223,11 +440,20 @@ static int malidp_de_plane_check(struct drm_plane *plane,
if (ret)
return ret;
- /* packed RGB888 / BGR888 can't be rotated or flipped */
- if (state->rotation != DRM_MODE_ROTATE_0 &&
- (fb->format->format == DRM_FORMAT_RGB888 ||
- fb->format->format == DRM_FORMAT_BGR888))
- return -EINVAL;
+ /* validate the rotation constraints for each layer */
+ if (state->rotation != DRM_MODE_ROTATE_0) {
+ if (mp->layer->rot == ROTATE_NONE)
+ return -EINVAL;
+ if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
+ return -EINVAL;
+ /*
+ * packed RGB888 / BGR888 can't be rotated or flipped
+ * unless they are stored in a compressed way
+ */
+ if ((fb->format->format == DRM_FORMAT_RGB888 ||
+ fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier))
+ return -EINVAL;
+ }
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
@@ -242,6 +468,14 @@ static int malidp_de_plane_check(struct drm_plane *plane,
ms->rotmem_size = val;
}
+ /* HW can't support plane + pixel blending */
+ if ((state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
+ (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
+ fb->format->has_alpha)
+ return -EINVAL;
+
+ malidp_de_prefetch_settings(mp, ms);
+
return 0;
}
@@ -318,22 +552,42 @@ static void malidp_de_set_color_encoding(struct malidp_plane *plane,
}
}
+static void malidp_de_set_mmu_control(struct malidp_plane *mp,
+ struct malidp_plane_state *ms)
+{
+ u32 mmu_ctrl;
+
+ /* check hardware supports MMU prefetch */
+ if (!mp->layer->mmu_ctrl_offset)
+ return;
+
+ mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode,
+ MALIDP_MMU_PREFETCH_READAHEAD,
+ ms->n_planes,
+ ms->mmu_prefetch_pgsize);
+
+ malidp_hw_write(mp->hwdev, mmu_ctrl,
+ mp->layer->base + mp->layer->mmu_ctrl_offset);
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct malidp_plane *mp;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
+ struct drm_plane_state *state = plane->state;
+ u16 pixel_alpha = state->pixel_blend_mode;
+ u8 plane_alpha = state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
- bool format_has_alpha = plane->state->fb->format->has_alpha;
mp = to_malidp_plane(plane);
/* convert src values from Q16 fixed point to integer */
- src_w = plane->state->src_w >> 16;
- src_h = plane->state->src_h >> 16;
- dest_w = plane->state->crtc_w;
- dest_h = plane->state->crtc_h;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ dest_w = state->crtc_w;
+ dest_h = state->crtc_h;
val = malidp_hw_read(mp->hwdev, mp->layer->base);
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
@@ -342,14 +596,17 @@ static void malidp_de_plane_update(struct drm_plane *plane,
for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
u16 ptr = mp->layer->ptr + (i << 4);
- dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(plane->state->fb,
- plane->state, i);
+ dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb,
+ state, i);
malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr);
malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4);
}
+
+ malidp_de_set_mmu_control(mp, ms);
+
malidp_de_set_plane_pitches(mp, ms->n_planes,
- plane->state->fb->pitches);
+ state->fb->pitches);
if ((plane->state->color_encoding != old_state->color_encoding) ||
(plane->state->color_range != old_state->color_range))
@@ -362,52 +619,56 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
mp->layer->base + MALIDP_LAYER_COMP_SIZE);
- malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) |
- LAYER_V_VAL(plane->state->crtc_y),
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(state->crtc_x) |
+ LAYER_V_VAL(state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
- if (mp->layer->id == DE_SMART)
+ if (mp->layer->id == DE_SMART) {
+ /*
+ * Enable the first rectangle in the SMART layer to be
+ * able to use it as a drm plane.
+ */
+ malidp_hw_write(mp->hwdev, 1,
+ mp->layer->base + MALIDP550_LS_ENABLE);
malidp_hw_write(mp->hwdev,
LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+ }
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
- if (plane->state->rotation & DRM_MODE_ROTATE_MASK)
+ if (state->rotation & DRM_MODE_ROTATE_MASK)
val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
LAYER_ROT_OFFSET;
- if (plane->state->rotation & DRM_MODE_REFLECT_X)
+ if (state->rotation & DRM_MODE_REFLECT_X)
val |= LAYER_H_FLIP;
- if (plane->state->rotation & DRM_MODE_REFLECT_Y)
+ if (state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
- val &= ~LAYER_COMP_MASK;
- if (format_has_alpha) {
-
- /*
- * always enable pixel alpha blending until we have a way
- * to change blend modes
- */
- val |= LAYER_COMP_PIXEL;
- } else {
+ val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
- /*
- * do not enable pixel alpha blending as the color channel
- * does not have any alpha information
- */
+ if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= LAYER_COMP_PLANE;
-
- /* Set layer alpha coefficient to 0xff ie fully opaque */
- val |= LAYER_ALPHA(0xff);
+ } else if (state->fb->format->has_alpha) {
+ /* We only care about blend mode if the format has alpha */
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PREMULTI:
+ val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE;
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= LAYER_COMP_PIXEL;
+ break;
+ }
}
+ val |= LAYER_ALPHA(plane_alpha);
val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
- if (plane->state->crtc) {
+ if (state->crtc) {
struct malidp_crtc_state *m =
- to_malidp_crtc_state(plane->state->crtc->state);
+ to_malidp_crtc_state(state->crtc->state);
if (m->scaler_config.scale_enable &&
m->scaler_config.plane_src_id == mp->layer->id)
@@ -446,6 +707,9 @@ int malidp_de_planes_init(struct drm_device *drm)
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
u32 *formats;
int ret, i, j, n;
@@ -483,13 +747,10 @@ int malidp_de_planes_init(struct drm_device *drm)
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base, blend_caps);
+
if (id == DE_SMART) {
- /*
- * Enable the first rectangle in the SMART layer to be
- * able to use it as a drm plane.
- */
- malidp_hw_write(malidp->dev, 1,
- plane->layer->base + MALIDP550_LS_ENABLE);
/* Skip the features which the SMART layer doesn't have. */
continue;
}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 3579d36b2a71..7ce3e141464d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -205,6 +205,7 @@
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
@@ -238,6 +239,7 @@
#define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
#define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
@@ -245,6 +247,17 @@
#define MALIDP550_CONFIG_VALID 0x0c014
#define MALIDP550_CONFIG_ID 0x0ffd4
+/* register offsets specific to DP650 */
+#define MALIDP650_DE_LV_MMU_CTRL 0x000D0
+#define MALIDP650_DE_LG_MMU_CTRL 0x00048
+#define MALIDP650_DE_LS_MMU_CTRL 0x00078
+
+/* bit masks to set the MMU control register */
+#define MALIDP_MMU_CTRL_EN (1 << 0)
+#define MALIDP_MMU_CTRL_MODE (1 << 4)
+#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x)))
+#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12)
+
/*
* Starting with DP550 the register map blocks has been standardised to the
* following layout:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a504a5e05676..9b111e846847 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,6 +56,25 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
"Overallocation of the fbdev buffer (%) [default="
__MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]");
+/*
+ * In order to keep user-space compatibility, we want in certain use-cases
+ * to keep leaking the fbdev physical address to the user-space program
+ * handling the fbdev buffer.
+ * This is a bad habit essentially kept into closed source opengl driver
+ * that should really be moved into open-source upstream projects instead
+ * of using legacy physical addresses in user space to communicate with
+ * other out-of-tree kernel modules.
+ *
+ * This module_param *should* be removed as soon as possible and be
+ * considered as a broken and legacy behaviour from a modern fbdev device.
+ */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+static bool drm_leak_fbdev_smem = false;
+module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
+MODULE_PARM_DESC(fbdev_emulation,
+ "Allow unsafe leaking fbdev physical smem address [default=false]");
+#endif
+
static LIST_HEAD(kernel_fb_helper_list);
static DEFINE_MUTEX(kernel_fb_helper_lock);
@@ -2670,8 +2689,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
info->var.pixclock = 0;
- /* don't leak any physical addresses to userspace */
- info->flags |= FBINFO_HIDE_SMEM_START;
+ /* Shamelessly allow physical address leaking to userspace */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ if (!drm_leak_fbdev_smem)
+#endif
+ /* don't leak any physical addresses to userspace */
+ info->flags |= FBINFO_HIDE_SMEM_START;
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
@@ -3081,6 +3104,12 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
fbi->screen_buffer = buffer->vaddr;
+ /* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+ fbi->fix.smem_start =
+ page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
strcpy(fbi->fix.id, "DRM emulated");
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 0db486d10d1c..c33f95e08e1b 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
- panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
- if (!panel->link) {
- dev_err(panel->dev, "failed to link panel to %s\n",
- dev_name(connector->dev->dev));
- return -EINVAL;
- }
-
panel->connector = connector;
panel->drm = connector->dev;
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
*/
int drm_panel_detach(struct drm_panel *panel)
{
- device_link_del(panel->link);
-
panel->connector = NULL;
panel->drm = NULL;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 280a41d33081..ab4e70e63f6e 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,6 +30,12 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
+static const struct drm_dmi_panel_orientation_data acer_s1003 = {
+ .width = 800,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.width = 800,
.height = 1280,
@@ -75,7 +81,13 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
};
static const struct dmi_system_id orientation_data[] = {
- { /* Asus T100HA */
+ { /* Acer One 10 (S1003) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
+ },
+ .driver_data = (void *)&acer_s1003,
+ }, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index b7eaa603f368..c8252cd4c02d 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -157,6 +157,8 @@ static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{
u64 pt_value = 0;
+ WARN_ON(*fence);
+
if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
/*BINARY syncobj always wait on last pt */
pt_value = syncobj->signal_point;
@@ -935,6 +937,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
+ if (entries[i].fence)
+ continue;
+
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 9b2720b41571..83c1f46670bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
if (!dev->platform_data) {
struct device_node *core_node;
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
- pdev = platform_device_register_simple("etnaviv", -1,
- NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
+
+ pdev = platform_device_alloc("etnaviv", -1);
+ if (!pdev) {
+ ret = -ENOMEM;
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ /*
+ * Apply the same DMA configuration to the virtual etnaviv
+ * device as the GPU we found. This assumes that all Vivante
+ * GPUs in the system share the same DMA constraints.
+ */
+ of_dma_configure(&pdev->dev, np, true);
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
of_node_put(np);
goto unregister_platform_driver;
}
+
etnaviv_drm = pdev;
of_node_put(np);
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 69e9b431bf1f..e7c3ed6c9a2e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -105,7 +105,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
- schedule_delayed_work(&sched_job->work_tdr,
+ schedule_delayed_work(&sched_job->sched->work_tdr,
sched_job->sched->timeout);
return;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b599f74692e5..6f76baf4550a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -149,37 +149,15 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct exynos_drm_private *private;
-
- if (!drm_dev)
- return 0;
-
- private = drm_dev->dev_private;
-
- drm_kms_helper_poll_disable(drm_dev);
- exynos_drm_fbdev_suspend(drm_dev);
- private->suspend_state = drm_atomic_helper_suspend(drm_dev);
- if (IS_ERR(private->suspend_state)) {
- exynos_drm_fbdev_resume(drm_dev);
- drm_kms_helper_poll_enable(drm_dev);
- return PTR_ERR(private->suspend_state);
- }
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static void exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct exynos_drm_private *private;
-
- if (!drm_dev)
- return;
- private = drm_dev->dev_private;
- drm_atomic_helper_resume(drm_dev, private->suspend_state);
- exynos_drm_fbdev_resume(drm_dev);
- drm_kms_helper_poll_enable(drm_dev);
+ drm_mode_config_helper_resume(drm_dev);
}
static const struct dev_pm_ops exynos_drm_pm_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c737c4bd2c19..ec9604f1272b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -92,6 +92,8 @@ struct exynos_drm_plane {
#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2)
#define EXYNOS_DRM_PLANE_CAP_TILE (1 << 3)
+#define EXYNOS_DRM_PLANE_CAP_PIX_BLEND (1 << 4)
+#define EXYNOS_DRM_PLANE_CAP_WIN_BLEND (1 << 5)
/*
* Exynos DRM plane configuration structure.
@@ -195,7 +197,6 @@ struct drm_exynos_file_private {
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
- struct drm_atomic_state *suspend_state;
struct device *g2d_dev;
struct device *dma_dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 781b82c2c579..07af7758066d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -255,6 +255,7 @@ struct exynos_dsi {
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
struct drm_panel *panel;
+ struct drm_bridge *out_bridge;
struct device *dev;
void __iomem *reg_base;
@@ -279,7 +280,7 @@ struct exynos_dsi {
struct list_head transfer_list;
const struct exynos_dsi_driver_data *driver_data;
- struct device_node *bridge_node;
+ struct device_node *in_bridge_node;
};
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
@@ -1382,29 +1383,37 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
return;
pm_runtime_get_sync(dsi->dev);
-
dsi->state |= DSIM_STATE_ENABLED;
- ret = drm_panel_prepare(dsi->panel);
- if (ret < 0) {
- dsi->state &= ~DSIM_STATE_ENABLED;
- pm_runtime_put_sync(dsi->dev);
- return;
+ if (dsi->panel) {
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0)
+ goto err_put_sync;
+ } else {
+ drm_bridge_pre_enable(dsi->out_bridge);
}
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
- ret = drm_panel_enable(dsi->panel);
- if (ret < 0) {
- dsi->state &= ~DSIM_STATE_ENABLED;
- exynos_dsi_set_display_enable(dsi, false);
- drm_panel_unprepare(dsi->panel);
- pm_runtime_put_sync(dsi->dev);
- return;
+ if (dsi->panel) {
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0)
+ goto err_display_disable;
+ } else {
+ drm_bridge_enable(dsi->out_bridge);
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+ return;
+
+err_display_disable:
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
+
+err_put_sync:
+ dsi->state &= ~DSIM_STATE_ENABLED;
+ pm_runtime_put(dsi->dev);
}
static void exynos_dsi_disable(struct drm_encoder *encoder)
@@ -1417,11 +1426,11 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
drm_panel_disable(dsi->panel);
+ drm_bridge_disable(dsi->out_bridge);
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
-
+ drm_bridge_post_disable(dsi->out_bridge);
dsi->state &= ~DSIM_STATE_ENABLED;
-
pm_runtime_put_sync(dsi->dev);
}
@@ -1499,7 +1508,30 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct exynos_dsi *dsi = host_to_dsi(host);
- struct drm_device *drm = dsi->connector.dev;
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm = encoder->dev;
+ struct drm_bridge *out_bridge;
+
+ out_bridge = of_drm_find_bridge(device->dev.of_node);
+ if (out_bridge) {
+ drm_bridge_attach(encoder, out_bridge, NULL);
+ dsi->out_bridge = out_bridge;
+ encoder->bridge = NULL;
+ } else {
+ int ret = exynos_dsi_create_connector(encoder);
+
+ if (ret) {
+ DRM_ERROR("failed to create connector ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ dsi->connector.status = connector_status_connected;
+ }
+ }
/*
* This is a temporary solution and should be made by more generic way.
@@ -1518,14 +1550,6 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (IS_ERR(dsi->panel))
- dsi->panel = NULL;
-
- if (dsi->panel) {
- drm_panel_attach(dsi->panel, &dsi->connector);
- dsi->connector.status = connector_status_connected;
- }
exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
@@ -1541,19 +1565,21 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct exynos_dsi *dsi = host_to_dsi(host);
- struct drm_device *drm = dsi->connector.dev;
-
- mutex_lock(&drm->mode_config.mutex);
+ struct drm_device *drm = dsi->encoder.dev;
if (dsi->panel) {
+ mutex_lock(&drm->mode_config.mutex);
exynos_dsi_disable(&dsi->encoder);
drm_panel_detach(dsi->panel);
dsi->panel = NULL;
dsi->connector.status = connector_status_disconnected;
+ mutex_unlock(&drm->mode_config.mutex);
+ } else {
+ if (dsi->out_bridge->funcs->detach)
+ dsi->out_bridge->funcs->detach(dsi->out_bridge);
+ dsi->out_bridge = NULL;
}
- mutex_unlock(&drm->mode_config.mutex);
-
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
@@ -1634,7 +1660,7 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
+ dsi->in_bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
return 0;
}
@@ -1645,7 +1671,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_encoder *encoder = dev_get_drvdata(dev);
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_device *drm_dev = data;
- struct drm_bridge *bridge;
+ struct drm_bridge *in_bridge;
int ret;
drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
@@ -1657,17 +1683,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (ret < 0)
return ret;
- ret = exynos_dsi_create_connector(encoder);
- if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
- drm_encoder_cleanup(encoder);
- return ret;
- }
-
- if (dsi->bridge_node) {
- bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge)
- drm_bridge_attach(encoder, bridge, NULL);
+ if (dsi->in_bridge_node) {
+ in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
+ if (in_bridge)
+ drm_bridge_attach(encoder, in_bridge, NULL);
}
return mipi_dsi_host_register(&dsi->dsi_host);
@@ -1786,7 +1805,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
{
struct exynos_dsi *dsi = platform_get_drvdata(pdev);
- of_node_put(dsi->bridge_node);
+ of_node_put(dsi->in_bridge_node);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 132dd52d0ac7..918dd2c82209 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -270,20 +270,3 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
private->fb_helper = NULL;
}
-void exynos_drm_fbdev_suspend(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(private->fb_helper, 1);
- console_unlock();
-}
-
-void exynos_drm_fbdev_resume(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(private->fb_helper, 0);
- console_unlock();
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index b33847223a85..6840b6aadbc0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -19,8 +19,6 @@
int exynos_drm_fbdev_init(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
-void exynos_drm_fbdev_suspend(struct drm_device *drm);
-void exynos_drm_fbdev_resume(struct drm_device *drm);
#else
@@ -39,14 +37,6 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
#define exynos_drm_output_poll_changed (NULL)
-static inline void exynos_drm_fbdev_suspend(struct drm_device *drm)
-{
-}
-
-static inline void exynos_drm_fbdev_resume(struct drm_device *drm)
-{
-}
-
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7ba414b52faa..ce15d46bfce8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -448,7 +448,7 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
}
-static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
+static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
@@ -514,6 +514,9 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
break;
}
+ if (tiled)
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+
gsc_write(cfg, GSC_IN_CON);
}
@@ -632,7 +635,7 @@ static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id,
gsc_src_set_buf_seq(ctx, buf_id, true);
}
-static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
+static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
@@ -698,6 +701,9 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
break;
}
+ if (tiled)
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+
gsc_write(cfg, GSC_OUT_CON);
}
@@ -1122,11 +1128,11 @@ static int gsc_commit(struct exynos_drm_ipp *ipp,
return ret;
}
- gsc_src_set_fmt(ctx, task->src.buf.fourcc);
+ gsc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
gsc_src_set_transf(ctx, task->transform.rotation);
gsc_src_set_size(ctx, &task->src);
gsc_src_set_addr(ctx, 0, &task->src);
- gsc_dst_set_fmt(ctx, task->dst.buf.fourcc);
+ gsc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
gsc_dst_set_size(ctx, &task->dst);
gsc_dst_set_addr(ctx, 0, &task->dst);
gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
@@ -1200,6 +1206,10 @@ static const unsigned int gsc_formats[] = {
DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
};
+static const unsigned int gsc_tiled_formats[] = {
+ DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+};
+
static int gsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1207,23 +1217,24 @@ static int gsc_probe(struct platform_device *pdev)
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
struct resource *res;
- int ret, i;
+ int num_formats, ret, i, j;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- formats = devm_kcalloc(dev,
- ARRAY_SIZE(gsc_formats), sizeof(*formats),
- GFP_KERNEL);
- if (!formats)
- return -ENOMEM;
-
driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
ctx->dev = dev;
ctx->num_clocks = driver_data->num_clocks;
ctx->clk_names = driver_data->clk_names;
+ /* construct formats/limits array */
+ num_formats = ARRAY_SIZE(gsc_formats) + ARRAY_SIZE(gsc_tiled_formats);
+ formats = devm_kcalloc(dev, num_formats, sizeof(*formats), GFP_KERNEL);
+ if (!formats)
+ return -ENOMEM;
+
+ /* linear formats */
for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) {
formats[i].fourcc = gsc_formats[i];
formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
@@ -1231,8 +1242,19 @@ static int gsc_probe(struct platform_device *pdev)
formats[i].limits = driver_data->limits;
formats[i].num_limits = driver_data->num_limits;
}
+
+ /* tiled formats */
+ for (j = i, i = 0; i < ARRAY_SIZE(gsc_tiled_formats); j++, i++) {
+ formats[j].fourcc = gsc_tiled_formats[i];
+ formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_16_16_TILE;
+ formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
+ DRM_EXYNOS_IPP_FORMAT_DESTINATION;
+ formats[j].limits = driver_data->limits;
+ formats[j].num_limits = driver_data->num_limits;
+ }
+
ctx->formats = formats;
- ctx->num_formats = ARRAY_SIZE(gsc_formats);
+ ctx->num_formats = num_formats;
/* clock control */
for (i = 0; i < ctx->num_clocks; i++) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index dba29aec59b4..df0508e0e49e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -131,16 +131,14 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
if (plane->state) {
exynos_state = to_exynos_plane_state(plane->state);
- if (exynos_state->base.fb)
- drm_framebuffer_put(exynos_state->base.fb);
+ __drm_atomic_helper_plane_destroy_state(plane->state);
kfree(exynos_state);
plane->state = NULL;
}
exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
if (exynos_state) {
- plane->state = &exynos_state->base;
- plane->state->plane = plane;
+ __drm_atomic_helper_plane_reset(plane, &exynos_state->base);
plane->state->zpos = exynos_plane->config->zpos;
}
}
@@ -300,6 +298,10 @@ int exynos_plane_init(struct drm_device *dev,
const struct exynos_drm_plane_config *config)
{
int err;
+ unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
+ struct drm_plane *plane = &exynos_plane->base;
err = drm_universal_plane_init(dev, &exynos_plane->base,
1 << dev->mode_config.num_crtc,
@@ -320,5 +322,11 @@ int exynos_plane_init(struct drm_device *dev,
exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos,
!(config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS));
+ if (config->capabilities & EXYNOS_DRM_PLANE_CAP_PIX_BLEND)
+ drm_plane_create_blend_mode_property(plane, supported_modes);
+
+ if (config->capabilities & EXYNOS_DRM_PLANE_CAP_WIN_BLEND)
+ drm_plane_create_alpha_property(plane);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 0ddb6eec7b11..cd66774e817d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -49,56 +49,46 @@ struct scaler_context {
const struct scaler_data *scaler_data;
};
-static u32 scaler_get_format(u32 drm_fmt)
+struct scaler_format {
+ u32 drm_fmt;
+ u32 internal_fmt;
+ u32 chroma_tile_w;
+ u32 chroma_tile_h;
+};
+
+static const struct scaler_format scaler_formats[] = {
+ { DRM_FORMAT_NV12, SCALER_YUV420_2P_UV, 8, 8 },
+ { DRM_FORMAT_NV21, SCALER_YUV420_2P_VU, 8, 8 },
+ { DRM_FORMAT_YUV420, SCALER_YUV420_3P, 8, 8 },
+ { DRM_FORMAT_YUYV, SCALER_YUV422_1P_YUYV, 16, 16 },
+ { DRM_FORMAT_UYVY, SCALER_YUV422_1P_UYVY, 16, 16 },
+ { DRM_FORMAT_YVYU, SCALER_YUV422_1P_YVYU, 16, 16 },
+ { DRM_FORMAT_NV16, SCALER_YUV422_2P_UV, 8, 16 },
+ { DRM_FORMAT_NV61, SCALER_YUV422_2P_VU, 8, 16 },
+ { DRM_FORMAT_YUV422, SCALER_YUV422_3P, 8, 16 },
+ { DRM_FORMAT_NV24, SCALER_YUV444_2P_UV, 16, 16 },
+ { DRM_FORMAT_NV42, SCALER_YUV444_2P_VU, 16, 16 },
+ { DRM_FORMAT_YUV444, SCALER_YUV444_3P, 16, 16 },
+ { DRM_FORMAT_RGB565, SCALER_RGB_565, 0, 0 },
+ { DRM_FORMAT_XRGB1555, SCALER_ARGB1555, 0, 0 },
+ { DRM_FORMAT_ARGB1555, SCALER_ARGB1555, 0, 0 },
+ { DRM_FORMAT_XRGB4444, SCALER_ARGB4444, 0, 0 },
+ { DRM_FORMAT_ARGB4444, SCALER_ARGB4444, 0, 0 },
+ { DRM_FORMAT_XRGB8888, SCALER_ARGB8888, 0, 0 },
+ { DRM_FORMAT_ARGB8888, SCALER_ARGB8888, 0, 0 },
+ { DRM_FORMAT_RGBX8888, SCALER_RGBA8888, 0, 0 },
+ { DRM_FORMAT_RGBA8888, SCALER_RGBA8888, 0, 0 },
+};
+
+static const struct scaler_format *scaler_get_format(u32 drm_fmt)
{
- switch (drm_fmt) {
- case DRM_FORMAT_NV12:
- return SCALER_YUV420_2P_UV;
- case DRM_FORMAT_NV21:
- return SCALER_YUV420_2P_VU;
- case DRM_FORMAT_YUV420:
- return SCALER_YUV420_3P;
- case DRM_FORMAT_YUYV:
- return SCALER_YUV422_1P_YUYV;
- case DRM_FORMAT_UYVY:
- return SCALER_YUV422_1P_UYVY;
- case DRM_FORMAT_YVYU:
- return SCALER_YUV422_1P_YVYU;
- case DRM_FORMAT_NV16:
- return SCALER_YUV422_2P_UV;
- case DRM_FORMAT_NV61:
- return SCALER_YUV422_2P_VU;
- case DRM_FORMAT_YUV422:
- return SCALER_YUV422_3P;
- case DRM_FORMAT_NV24:
- return SCALER_YUV444_2P_UV;
- case DRM_FORMAT_NV42:
- return SCALER_YUV444_2P_VU;
- case DRM_FORMAT_YUV444:
- return SCALER_YUV444_3P;
- case DRM_FORMAT_RGB565:
- return SCALER_RGB_565;
- case DRM_FORMAT_XRGB1555:
- return SCALER_ARGB1555;
- case DRM_FORMAT_ARGB1555:
- return SCALER_ARGB1555;
- case DRM_FORMAT_XRGB4444:
- return SCALER_ARGB4444;
- case DRM_FORMAT_ARGB4444:
- return SCALER_ARGB4444;
- case DRM_FORMAT_XRGB8888:
- return SCALER_ARGB8888;
- case DRM_FORMAT_ARGB8888:
- return SCALER_ARGB8888;
- case DRM_FORMAT_RGBX8888:
- return SCALER_RGBA8888;
- case DRM_FORMAT_RGBA8888:
- return SCALER_RGBA8888;
- default:
- break;
- }
+ int i;
- return 0;
+ for (i = 0; i < ARRAY_SIZE(scaler_formats); i++)
+ if (scaler_formats[i].drm_fmt == drm_fmt)
+ return &scaler_formats[i];
+
+ return NULL;
}
static inline int scaler_reset(struct scaler_context *scaler)
@@ -152,11 +142,11 @@ static inline void scaler_enable_int(struct scaler_context *scaler)
}
static inline void scaler_set_src_fmt(struct scaler_context *scaler,
- u32 src_fmt)
+ u32 src_fmt, u32 tile)
{
u32 val;
- val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt);
+ val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt) | (tile << 10);
scaler_write(val, SCALER_SRC_CFG);
}
@@ -188,15 +178,20 @@ static inline void scaler_set_src_span(struct scaler_context *scaler,
scaler_write(val, SCALER_SRC_SPAN);
}
-static inline void scaler_set_src_luma_pos(struct scaler_context *scaler,
- struct drm_exynos_ipp_task_rect *src_pos)
+static inline void scaler_set_src_luma_chroma_pos(struct scaler_context *scaler,
+ struct drm_exynos_ipp_task_rect *src_pos,
+ const struct scaler_format *fmt)
{
u32 val;
val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2);
val |= SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2);
scaler_write(val, SCALER_SRC_Y_POS);
- scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */
+ val = SCALER_SRC_C_POS_SET_CH_POS(
+ (src_pos->x * fmt->chroma_tile_w / 16) << 2);
+ val |= SCALER_SRC_C_POS_SET_CV_POS(
+ (src_pos->y * fmt->chroma_tile_h / 16) << 2);
+ scaler_write(val, SCALER_SRC_C_POS);
}
static inline void scaler_set_src_wh(struct scaler_context *scaler,
@@ -366,11 +361,12 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
struct scaler_context *scaler =
container_of(ipp, struct scaler_context, ipp);
- u32 src_fmt = scaler_get_format(task->src.buf.fourcc);
struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
-
- u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
+ const struct scaler_format *src_fmt, *dst_fmt;
+
+ src_fmt = scaler_get_format(task->src.buf.fourcc);
+ dst_fmt = scaler_get_format(task->dst.buf.fourcc);
pm_runtime_get_sync(scaler->dev);
if (scaler_reset(scaler)) {
@@ -380,13 +376,14 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
scaler->task = task;
- scaler_set_src_fmt(scaler, src_fmt);
+ scaler_set_src_fmt(
+ scaler, src_fmt->internal_fmt, task->src.buf.modifier != 0);
scaler_set_src_base(scaler, &task->src);
scaler_set_src_span(scaler, &task->src);
- scaler_set_src_luma_pos(scaler, src_pos);
+ scaler_set_src_luma_chroma_pos(scaler, src_pos, src_fmt);
scaler_set_src_wh(scaler, src_pos);
- scaler_set_dst_fmt(scaler, dst_fmt);
+ scaler_set_dst_fmt(scaler, dst_fmt->internal_fmt);
scaler_set_dst_base(scaler, &task->dst);
scaler_set_dst_span(scaler, &task->dst);
scaler_set_dst_luma_pos(scaler, dst_pos);
@@ -617,6 +614,16 @@ static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = {
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
+static const struct drm_exynos_ipp_limit scaler_5420_tile_limits[] = {
+ { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K })},
+ { IPP_SIZE_LIMIT(AREA, .h.align = 16, .v.align = 16) },
+ { IPP_SCALE_LIMIT(.h = {1, 1}, .v = {1, 1})},
+ { }
+};
+
+#define IPP_SRCDST_TILE_FORMAT(f, l) \
+ IPP_SRCDST_MFORMAT(f, DRM_FORMAT_MOD_SAMSUNG_16_16_TILE, (l))
+
static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
/* SCALER_YUV420_2P_UV */
{ IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) },
@@ -680,6 +687,18 @@ static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
/* SCALER_RGBA8888 */
{ IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) },
+
+ /* SCALER_YUV420_2P_UV TILE */
+ { IPP_SRCDST_TILE_FORMAT(NV21, scaler_5420_tile_limits) },
+
+ /* SCALER_YUV420_2P_VU TILE */
+ { IPP_SRCDST_TILE_FORMAT(NV12, scaler_5420_tile_limits) },
+
+ /* SCALER_YUV420_3P TILE */
+ { IPP_SRCDST_TILE_FORMAT(YUV420, scaler_5420_tile_limits) },
+
+ /* SCALER_YUV422_1P_YUYV TILE */
+ { IPP_SRCDST_TILE_FORMAT(YUYV, scaler_5420_tile_limits) },
};
static const struct scaler_data exynos5420_data = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index ffbf4a950f69..e3a4ecbc503b 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -131,14 +131,18 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
.pixel_formats = mixer_formats,
.num_pixel_formats = ARRAY_SIZE(mixer_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
- EXYNOS_DRM_PLANE_CAP_ZPOS,
+ EXYNOS_DRM_PLANE_CAP_ZPOS |
+ EXYNOS_DRM_PLANE_CAP_PIX_BLEND |
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
}, {
.zpos = 1,
.type = DRM_PLANE_TYPE_CURSOR,
.pixel_formats = mixer_formats,
.num_pixel_formats = ARRAY_SIZE(mixer_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
- EXYNOS_DRM_PLANE_CAP_ZPOS,
+ EXYNOS_DRM_PLANE_CAP_ZPOS |
+ EXYNOS_DRM_PLANE_CAP_PIX_BLEND |
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
}, {
.zpos = 2,
.type = DRM_PLANE_TYPE_OVERLAY,
@@ -146,7 +150,8 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
.num_pixel_formats = ARRAY_SIZE(vp_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
EXYNOS_DRM_PLANE_CAP_ZPOS |
- EXYNOS_DRM_PLANE_CAP_TILE,
+ EXYNOS_DRM_PLANE_CAP_TILE |
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND,
},
};
@@ -309,31 +314,42 @@ static void vp_default_filter(struct mixer_context *ctx)
}
static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
- bool alpha)
+ unsigned int pixel_alpha, unsigned int alpha)
{
+ u32 win_alpha = alpha >> 8;
u32 val;
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
- if (alpha) {
- /* blending based on pixel alpha */
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
val |= MXR_GRP_CFG_BLEND_PRE_MUL;
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ break;
+ }
+
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= win_alpha;
}
mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
val, MXR_GRP_CFG_MISC_MASK);
}
-static void mixer_cfg_vp_blend(struct mixer_context *ctx)
+static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
{
- u32 val;
+ u32 win_alpha = alpha >> 8;
+ u32 val = 0;
- /*
- * No blending at the moment since the NV12/NV21 pixelformats don't
- * have an alpha channel. However the mixer supports a global alpha
- * value for a layer. Once this functionality is exposed, we can
- * support blending of the video layer through this.
- */
- val = 0;
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= MXR_VID_CFG_BLEND_EN;
+ val |= win_alpha;
+ }
mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
@@ -529,7 +545,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_write(ctx, VP_BOT_C_PTR, chroma_addr[1]);
mixer_cfg_layer(ctx, plane->index, priority, true);
- mixer_cfg_vp_blend(ctx);
+ mixer_cfg_vp_blend(ctx, state->base.alpha);
spin_unlock_irqrestore(&ctx->reg_slock, flags);
@@ -553,10 +569,16 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
unsigned int dst_x_offset, dst_y_offset;
+ unsigned int pixel_alpha;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
switch (fb->format->format) {
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
@@ -616,7 +638,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr);
mixer_cfg_layer(ctx, win, priority, true);
- mixer_cfg_gfx_blend(ctx, win, fb->format->has_alpha);
+ mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
/* layer update mandatory for mixer 16.0.33.0 */
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 189cfa2470a8..d2b8194a07bf 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -109,12 +109,15 @@
#define MXR_CFG_SCAN_HD (1 << 0)
#define MXR_CFG_SCAN_MASK 0x47
+/* bits for MXR_VIDEO_CFG */
+#define MXR_VID_CFG_BLEND_EN (1 << 16)
+
/* bits for MXR_GRAPHICn_CFG */
#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
-#define MXR_GRP_CFG_MISC_MASK ((3 << 16) | (3 << 20))
+#define MXR_GRP_CFG_MISC_MASK ((3 << 16) | (3 << 20) | 0xff)
#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f7051e97afb..4f3ac0a12889 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4117,6 +4117,17 @@ i915_ring_test_irq_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ /* GuC keeps the user interrupt permanently enabled for submission */
+ if (USES_GUC_SUBMISSION(i915))
+ return -ENODEV;
+
+ /*
+ * From icl, we can no longer individually mask interrupt generation
+ * from each engine.
+ */
+ if (INTEL_GEN(i915) >= 11)
+ return -ENODEV;
+
val &= INTEL_INFO(i915)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
@@ -4178,7 +4189,7 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (val & DROP_RESET_SEQNO) {
+ if (ret == 0 && val & DROP_RESET_SEQNO) {
intel_runtime_pm_get(i915);
ret = i915_gem_set_global_seqno(&i915->drm, 1);
intel_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ddf8538cb47..44e2c0f5ec50 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1063,6 +1063,300 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
+static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
+{
+ if (size == 0)
+ return I915_DRAM_RANK_INVALID;
+ if (rank == SKL_DRAM_RANK_SINGLE)
+ return I915_DRAM_RANK_SINGLE;
+ else if (rank == SKL_DRAM_RANK_DUAL)
+ return I915_DRAM_RANK_DUAL;
+
+ return I915_DRAM_RANK_INVALID;
+}
+
+static bool
+skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
+{
+ if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
+ return true;
+ else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
+ return true;
+ else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
+ return true;
+ else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
+ return true;
+
+ return false;
+}
+
+static int
+skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
+{
+ u32 tmp_l, tmp_s;
+ u32 s_val = val >> SKL_DRAM_S_SHIFT;
+
+ if (!val)
+ return -EINVAL;
+
+ tmp_l = val & SKL_DRAM_SIZE_MASK;
+ tmp_s = s_val & SKL_DRAM_SIZE_MASK;
+
+ if (tmp_l == 0 && tmp_s == 0)
+ return -EINVAL;
+
+ ch->l_info.size = tmp_l;
+ ch->s_info.size = tmp_s;
+
+ tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ ch->l_info.width = (1 << tmp_l) * 8;
+ ch->s_info.width = (1 << tmp_s) * 8;
+
+ tmp_l = val & SKL_DRAM_RANK_MASK;
+ tmp_s = s_val & SKL_DRAM_RANK_MASK;
+ ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
+ ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
+
+ if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
+ ch->s_info.rank == I915_DRAM_RANK_DUAL)
+ ch->rank = I915_DRAM_RANK_DUAL;
+ else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
+ ch->s_info.rank == I915_DRAM_RANK_SINGLE)
+ ch->rank = I915_DRAM_RANK_DUAL;
+ else
+ ch->rank = I915_DRAM_RANK_SINGLE;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
+ ch->l_info.width) ||
+ skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
+ ch->s_info.width);
+
+ DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
+ ch->l_info.size, ch->l_info.width,
+ ch->l_info.rank ? "dual" : "single",
+ ch->s_info.size, ch->s_info.width,
+ ch->s_info.rank ? "dual" : "single");
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
+ struct dram_channel_info *ch0)
+{
+ return (val_ch0 == val_ch1 &&
+ (ch0->s_info.size == 0 ||
+ (ch0->l_info.size == ch0->s_info.size &&
+ ch0->l_info.width == ch0->s_info.width &&
+ ch0->l_info.rank == ch0->s_info.rank)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ struct dram_channel_info ch0, ch1;
+ u32 val_ch0, val_ch1;
+ int ret;
+
+ val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(&ch0, val_ch0);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(&ch1, val_ch1);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ DRM_INFO("Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid_dimm = true;
+
+ /*
+ * If any of the channel is single rank channel, worst case output
+ * will be same as if single rank memory, so consider single rank
+ * memory.
+ */
+ if (ch0.rank == I915_DRAM_RANK_SINGLE ||
+ ch1.rank == I915_DRAM_RANK_SINGLE)
+ dram_info->rank = I915_DRAM_RANK_SINGLE;
+ else
+ dram_info->rank = max(ch0.rank, ch1.rank);
+
+ if (dram_info->rank == I915_DRAM_RANK_INVALID) {
+ DRM_INFO("couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
+ dram_info->is_16gb_dimm = true;
+
+ dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
+ val_ch1,
+ &ch0);
+
+ DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
+ dev_priv->dram_info.symmetric_memory ? "" : "not ");
+ return 0;
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 mem_freq_khz, val;
+ int ret;
+
+ ret = skl_dram_get_channels_info(dev_priv);
+ if (ret)
+ return ret;
+
+ val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+ SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_info->bandwidth_kbps = dram_info->num_channels *
+ mem_freq_khz * 8;
+
+ if (dram_info->bandwidth_kbps == 0) {
+ DRM_INFO("Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+ return 0;
+}
+
+static int
+bxt_get_dram_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 dram_channels;
+ u32 mem_freq_khz, val;
+ u8 num_active_channels;
+ int i;
+
+ val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
+ mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+ BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+ num_active_channels = hweight32(dram_channels);
+
+ /* Each active bit represents 4-byte channel */
+ dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+ if (dram_info->bandwidth_kbps == 0) {
+ DRM_INFO("Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ u8 size, width;
+ enum dram_rank rank;
+ u32 tmp;
+
+ val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+ tmp = val & BXT_DRAM_RANK_MASK;
+
+ if (tmp == BXT_DRAM_RANK_SINGLE)
+ rank = I915_DRAM_RANK_SINGLE;
+ else if (tmp == BXT_DRAM_RANK_DUAL)
+ rank = I915_DRAM_RANK_DUAL;
+ else
+ rank = I915_DRAM_RANK_INVALID;
+
+ tmp = val & BXT_DRAM_SIZE_MASK;
+ if (tmp == BXT_DRAM_SIZE_4GB)
+ size = 4;
+ else if (tmp == BXT_DRAM_SIZE_6GB)
+ size = 6;
+ else if (tmp == BXT_DRAM_SIZE_8GB)
+ size = 8;
+ else if (tmp == BXT_DRAM_SIZE_12GB)
+ size = 12;
+ else if (tmp == BXT_DRAM_SIZE_16GB)
+ size = 16;
+ else
+ size = 0;
+
+ tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+ width = (1 << tmp) * 8;
+ DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
+ width, rank == I915_DRAM_RANK_SINGLE ? "single" :
+ rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
+
+ /*
+ * If any of the channel is single rank channel,
+ * worst case output will be same as if single rank
+ * memory, so consider single rank memory.
+ */
+ if (dram_info->rank == I915_DRAM_RANK_INVALID)
+ dram_info->rank = rank;
+ else if (rank == I915_DRAM_RANK_SINGLE)
+ dram_info->rank = I915_DRAM_RANK_SINGLE;
+ }
+
+ if (dram_info->rank == I915_DRAM_RANK_INVALID) {
+ DRM_INFO("couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid_dimm = true;
+ dram_info->valid = true;
+ return 0;
+}
+
+static void
+intel_get_dram_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ char bandwidth_str[32];
+ int ret;
+
+ dram_info->valid = false;
+ dram_info->valid_dimm = false;
+ dram_info->is_16gb_dimm = false;
+ dram_info->rank = I915_DRAM_RANK_INVALID;
+ dram_info->bandwidth_kbps = 0;
+ dram_info->num_channels = 0;
+
+ if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
+ return;
+
+ /* Need to calculate bandwidth only for Gen9 */
+ if (IS_BROXTON(dev_priv))
+ ret = bxt_get_dram_info(dev_priv);
+ else if (INTEL_GEN(dev_priv) == 9)
+ ret = skl_get_dram_info(dev_priv);
+ else
+ ret = skl_dram_get_channels_info(dev_priv);
+ if (ret)
+ return;
+
+ if (dram_info->bandwidth_kbps)
+ sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
+ else
+ sprintf(bandwidth_str, "unknown");
+ DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
+ bandwidth_str, dram_info->num_channels);
+ DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
+ (dram_info->rank == I915_DRAM_RANK_DUAL) ?
+ "dual" : "single", yesno(dram_info->is_16gb_dimm));
+}
+
/**
* i915_driver_init_hw - setup state requiring device access
* @dev_priv: device private
@@ -1180,6 +1474,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
goto err_msi;
intel_opregion_setup(dev_priv);
+ /*
+ * Fill the dram structure to get the system raw bandwidth and
+ * dram info. This will be used for memory latency calculation.
+ */
+ intel_get_dram_info(dev_priv);
+
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7ea442033a57..8624b4bdc242 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -87,8 +87,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180906"
-#define DRIVER_TIMESTAMP 1536242083
+#define DRIVER_DATE "20180921"
+#define DRIVER_TIMESTAMP 1537521997
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -1946,6 +1946,20 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
+ struct dram_info {
+ bool valid;
+ bool valid_dimm;
+ bool is_16gb_dimm;
+ u8 num_channels;
+ enum dram_rank {
+ I915_DRAM_RANK_INVALID = 0,
+ I915_DRAM_RANK_SINGLE,
+ I915_DRAM_RANK_DUAL
+ } rank;
+ u32 bandwidth_kbps;
+ bool symmetric_memory;
+ } dram_info;
+
struct i915_runtime_pm runtime_pm;
struct {
@@ -2159,6 +2173,15 @@ struct drm_i915_private {
*/
};
+struct dram_channel_info {
+ struct info {
+ u8 size, width;
+ enum dram_rank rank;
+ } l_info, s_info;
+ enum dram_rank rank;
+ bool is_16gb_dimm;
+};
+
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@@ -2284,7 +2307,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#define for_each_sgt_dma(__dmap, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
((__dmap) = (__iter).dma + (__iter).curr); \
- (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
@@ -3074,6 +3097,12 @@ enum i915_map_type {
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};
+static inline enum i915_map_type
+i915_coherent_map_type(struct drm_i915_private *i915)
+{
+ return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+}
+
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
@@ -3311,7 +3340,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 89834ce19acd..aa3969d52773 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1127,11 +1127,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
offset = offset_in_page(args->offset);
for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
struct page *page = i915_gem_object_get_page(obj, idx);
- int length;
-
- length = remain;
- if (offset + length > PAGE_SIZE)
- length = PAGE_SIZE - offset;
+ unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pread(page, offset, length, user_data,
page_to_phys(page) & obj_do_bit17_swizzling,
@@ -1575,11 +1571,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = offset_in_page(args->offset);
for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
struct page *page = i915_gem_object_get_page(obj, idx);
- int length;
-
- length = remain;
- if (offset + length > PAGE_SIZE)
- length = PAGE_SIZE - offset;
+ unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pwrite(page, offset, length, user_data,
page_to_phys(page) & obj_do_bit17_swizzling,
@@ -2506,7 +2498,9 @@ static bool i915_sg_trim(struct sg_table *orig_st)
new_sg = new_st.sgl;
for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, 0);
- /* called before being DMA mapped, no need to copy sg->dma_* */
+ sg_dma_address(new_sg) = sg_dma_address(sg);
+ sg_dma_len(new_sg) = sg_dma_len(sg);
+
new_sg = sg_next(new_sg);
}
GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
@@ -3438,6 +3432,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
+ if (!intel_gpu_reset(i915, ALL_ENGINES))
+ intel_engines_sanitize(i915);
+
/*
* Undo nop_submit_request. We prevent all new i915 requests from
* being queued (by disallowing execbuf whilst wedged) so having
@@ -5414,8 +5411,19 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
assert_kernel_context_is_current(i915);
+ /*
+ * Immediately park the GPU so that we enable powersaving and
+ * treat it as idle. The next time we issue a request, we will
+ * unpark and start using the engine->pinned_default_state, otherwise
+ * it is in limbo and an early reset may fail.
+ */
+ __i915_gem_park(i915);
+
for_each_engine(engine, i915, id) {
struct i915_vma *state;
+ void *vaddr;
+
+ GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
state = to_intel_context(ctx, engine)->state;
if (!state)
@@ -5438,6 +5446,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
+
+ /* Check we can acquire the image of the context state */
+ vaddr = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_FORCE_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_active;
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 747b8170a15a..f772593b99ab 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -862,7 +862,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
- args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+ args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
@@ -896,27 +896,23 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx;
- int ret;
+ int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (!ctx)
return -ENOENT;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto out;
-
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
- if (args->size) {
+ if (args->size)
ret = -EINVAL;
- } else {
- ctx->flags &= ~CONTEXT_NO_ZEROMAP;
- ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
- }
+ else if (args->value)
+ set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
+ else
+ clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
if (args->size)
@@ -960,9 +956,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
}
- mutex_unlock(&dev->struct_mutex);
-out:
i915_gem_context_put(ctx);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index e09673ca731d..08165f6a0a84 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -117,15 +117,20 @@ struct i915_gem_context {
struct rcu_head rcu;
/**
+ * @user_flags: small set of booleans controlled by the user
+ */
+ unsigned long user_flags;
+#define UCONTEXT_NO_ZEROMAP 0
+#define UCONTEXT_NO_ERROR_CAPTURE 1
+#define UCONTEXT_BANNABLE 2
+
+ /**
* @flags: small set of booleans
*/
unsigned long flags;
-#define CONTEXT_NO_ZEROMAP BIT(0)
-#define CONTEXT_NO_ERROR_CAPTURE 1
-#define CONTEXT_CLOSED 2
-#define CONTEXT_BANNABLE 3
-#define CONTEXT_BANNED 4
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
+#define CONTEXT_BANNED 0
+#define CONTEXT_CLOSED 1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
/**
* @hw_id: - unique identifier for the context
@@ -209,37 +214,37 @@ static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx
static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
{
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
- __set_bit(CONTEXT_CLOSED, &ctx->flags);
+ set_bit(CONTEXT_CLOSED, &ctx->flags);
}
static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
{
- return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+ return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
{
- __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+ set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
{
- __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+ clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
{
- return test_bit(CONTEXT_BANNABLE, &ctx->flags);
+ return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
{
- __set_bit(CONTEXT_BANNABLE, &ctx->flags);
+ set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
{
- __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
+ clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
@@ -249,7 +254,7 @@ static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx
static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
{
- __set_bit(CONTEXT_BANNED, &ctx->flags);
+ set_bit(CONTEXT_BANNED, &ctx->flags);
}
static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6d99651c6c4b..f90a09b83370 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -693,9 +693,14 @@ static int eb_reserve(struct i915_execbuffer *eb)
eb_unreserve_vma(vma, &eb->flags[i]);
if (flags & EXEC_OBJECT_PINNED)
+ /* Pinned must have their slot */
list_add(&vma->exec_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
+ /* Map require the lowest 256MiB (aperture) */
list_add_tail(&vma->exec_link, &eb->unbound);
+ else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ /* Prioritise 4GiB region for restricted bo */
+ list_add(&vma->exec_link, &last);
else
list_add_tail(&vma->exec_link, &last);
}
@@ -743,7 +748,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
}
eb->context_flags = 0;
- if (ctx->flags & CONTEXT_NO_ZEROMAP)
+ if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index eb0e446d6482..56c7f8637311 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1050,7 +1050,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
do {
vaddr[idx->pte] = pte_encode | iter->dma;
- iter->dma += PAGE_SIZE;
+ iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg) {
@@ -1144,7 +1144,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) << PAGE_SHIFT))
+ rem >= (max - index) * I915_GTT_PAGE_SIZE))
maybe_64K = true;
vaddr = kmap_atomic_px(pt);
@@ -1169,7 +1169,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (maybe_64K && index < max &&
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) << PAGE_SHIFT)))
+ rem >= (max - index) * I915_GTT_PAGE_SIZE)))
maybe_64K = false;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
@@ -1759,7 +1759,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
pde, pte,
- (pde * GEN6_PTES + pte) * PAGE_SIZE);
+ (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
if (vaddr[pte + i] != scratch_pte)
seq_printf(m, " %08x", vaddr[pte + i]);
@@ -1842,10 +1842,10 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- unsigned int first_entry = start >> PAGE_SHIFT;
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
- unsigned int num_entries = length >> PAGE_SHIFT;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
while (num_entries) {
@@ -1886,7 +1886,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned first_entry = vma->node.start >> PAGE_SHIFT;
+ unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
@@ -1899,7 +1899,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
- iter.dma += PAGE_SIZE;
+ iter.dma += I915_GTT_PAGE_SIZE;
if (iter.dma == iter.max) {
iter.sg = __sg_next(iter.sg);
if (!iter.sg)
@@ -2037,7 +2037,7 @@ static int pd_vma_bind(struct i915_vma *vma,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct gen6_hw_ppgtt *ppgtt = vma->private;
- u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
struct i915_page_table *pt;
unsigned int pde;
@@ -2163,7 +2163,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.i915 = i915;
ppgtt->base.vm.dma = &i915->drm.pdev->dev;
- ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+ ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
i915_address_space_init(&ppgtt->base.vm, i915);
@@ -2456,7 +2456,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
@@ -2480,7 +2480,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start >> PAGE_SHIFT;
+ gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
for_each_sgt_dma(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
@@ -2499,7 +2499,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
iowrite32(vm->pte_encode(addr, level, flags), pte);
@@ -2519,7 +2519,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start >> PAGE_SHIFT;
+ unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
struct sgt_iter iter;
dma_addr_t addr;
for_each_sgt_dma(addr, iter, vma->pages)
@@ -2541,8 +2541,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
+ unsigned first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned num_entries = length / I915_GTT_PAGE_SIZE;
const gen8_pte_t scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t __iomem *gtt_base =
@@ -2657,8 +2657,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
+ unsigned first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned num_entries = length / I915_GTT_PAGE_SIZE;
gen6_pte_t scratch_pte, __iomem *gtt_base =
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -3005,7 +3005,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->iomap);
- i915_gem_cleanup_stolen(&dev_priv->drm);
+ i915_gem_cleanup_stolen(dev_priv);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3023,7 +3023,7 @@ static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
if (bdw_gmch_ctl > 4)
bdw_gmch_ctl = 4;
#endif
@@ -3398,7 +3398,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
@@ -3456,7 +3456,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.clear_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
@@ -3727,9 +3727,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
* the entries so the sg list can be happily traversed.
* The only thing we need are DMA addresses.
*/
- sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = in[offset + src_idx];
- sg_dma_len(sg) = PAGE_SIZE;
+ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= stride;
}
@@ -3742,7 +3742,7 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- const unsigned long n_pages = obj->base.size / PAGE_SIZE;
+ const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
struct sgt_iter sgt_iter;
dma_addr_t dma_addr;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 53440bf87650..f29a7ff7c362 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -167,10 +167,8 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-void i915_gem_cleanup_stolen(struct drm_device *dev)
+void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f7f2aa71d8d9..2835cacd0d08 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1365,15 +1365,20 @@ static void request_record_user_bo(struct i915_request *request,
{
struct i915_capture_list *c;
struct drm_i915_error_object **bo;
- long count;
+ long count, max;
- count = 0;
+ max = 0;
for (c = request->capture_list; c; c = c->next)
- count++;
+ max++;
+ if (!max)
+ return;
- bo = NULL;
- if (count)
- bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
+ bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+ if (!bo) {
+ /* If we can't capture everything, try to capture something. */
+ max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
+ bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+ }
if (!bo)
return;
@@ -1382,7 +1387,8 @@ static void request_record_user_bo(struct i915_request *request,
bo[count] = i915_error_object_create(request->i915, c->vma);
if (!bo[count])
break;
- count++;
+ if (++count == max)
+ break;
}
ee->user_bo = bo;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index ccb20230df2c..664b96bb65a3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1680,107 +1680,6 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
}
/*
- * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
- * is only used by the kernel context.
- */
-static int gen8_emit_oa_config(struct i915_request *rq,
- const struct i915_oa_config *oa_config)
-{
- struct drm_i915_private *dev_priv = rq->i915;
- /* The MMIO offsets for Flex EU registers aren't contiguous */
- u32 flex_mmio[] = {
- i915_mmio_reg_offset(EU_PERF_CNTL0),
- i915_mmio_reg_offset(EU_PERF_CNTL1),
- i915_mmio_reg_offset(EU_PERF_CNTL2),
- i915_mmio_reg_offset(EU_PERF_CNTL3),
- i915_mmio_reg_offset(EU_PERF_CNTL4),
- i915_mmio_reg_offset(EU_PERF_CNTL5),
- i915_mmio_reg_offset(EU_PERF_CNTL6),
- };
- u32 *cs;
- int i;
-
- cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
-
- *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
- *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME;
-
- for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
- u32 mmio = flex_mmio[i];
-
- /*
- * This arbitrary default will select the 'EU FPU0 Pipeline
- * Active' event. In the future it's anticipated that there
- * will be an explicit 'No Event' we can select, but not
- * yet...
- */
- u32 value = 0;
-
- if (oa_config) {
- u32 j;
-
- for (j = 0; j < oa_config->flex_regs_len; j++) {
- if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
- value = oa_config->flex_regs[j].value;
- break;
- }
- }
- }
-
- *cs++ = mmio;
- *cs++ = value;
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
-{
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct i915_timeline *timeline;
- struct i915_request *rq;
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- i915_retire_requests(dev_priv);
-
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- ret = gen8_emit_oa_config(rq, oa_config);
- if (ret) {
- i915_request_add(rq);
- return ret;
- }
-
- /* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct i915_request *prev;
-
- prev = i915_gem_active_raw(&timeline->last_request,
- &dev_priv->drm.struct_mutex);
- if (prev)
- i915_request_await_dma_fence(rq, &prev->fence);
- }
-
- i915_request_add(rq);
-
- return 0;
-}
-
-/*
* Manages updating the per-context aspects of the OA stream
* configuration across all contexts.
*
@@ -1808,17 +1707,13 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
+ struct i915_request *rq;
int ret;
- unsigned int wait_flags = I915_WAIT_LOCKED;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Switch away from any user context. */
- ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
- if (ret)
- return ret;
-
/*
* The OA register config is setup through the context image. This image
* might be written to by the GPU on context switch (in particular on
@@ -1833,7 +1728,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* the GPU from any submitted work.
*/
ret = i915_gem_wait_for_idle(dev_priv,
- wait_flags,
+ I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -1847,7 +1742,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
if (!ce->state)
continue;
- regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+ regs = i915_gem_object_pin_map(ce->state->obj, map_type);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1859,7 +1754,17 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ce->state->obj);
}
- return ret;
+ /*
+ * Apply the configuration by doing one context restore of the edited
+ * context image.
+ */
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ return 0;
}
static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 09bc8e730ee1..7c491ea3d052 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9339,6 +9339,9 @@ enum skl_power_gate {
#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
(port) + 10))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) + 10))
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \
+ 21 : (tc_port) + 12))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
(port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
@@ -9583,6 +9586,54 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
+#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
+#define BXT_REQ_DATA_MASK 0x3F
+#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
+#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
+#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
+
+#define BXT_D_CR_DRP0_DUNIT8 0x1000
+#define BXT_D_CR_DRP0_DUNIT9 0x1200
+#define BXT_D_CR_DRP0_DUNIT_START 8
+#define BXT_D_CR_DRP0_DUNIT_END 11
+#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
+ _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
+ BXT_D_CR_DRP0_DUNIT9))
+#define BXT_DRAM_RANK_MASK 0x3
+#define BXT_DRAM_RANK_SINGLE 0x1
+#define BXT_DRAM_RANK_DUAL 0x3
+#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
+#define BXT_DRAM_WIDTH_SHIFT 4
+#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
+#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
+#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
+#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
+#define BXT_DRAM_SIZE_MASK (0x7 << 6)
+#define BXT_DRAM_SIZE_SHIFT 6
+#define BXT_DRAM_SIZE_4GB (0x0 << 6)
+#define BXT_DRAM_SIZE_6GB (0x1 << 6)
+#define BXT_DRAM_SIZE_8GB (0x2 << 6)
+#define BXT_DRAM_SIZE_12GB (0x3 << 6)
+#define BXT_DRAM_SIZE_16GB (0x4 << 6)
+
+#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
+#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
+#define SKL_REQ_DATA_MASK (0xF << 0)
+
+#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
+#define SKL_DRAM_S_SHIFT 16
+#define SKL_DRAM_SIZE_MASK 0x3F
+#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
+#define SKL_DRAM_WIDTH_SHIFT 8
+#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
+#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
+#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define SKL_DRAM_RANK_SHIFT 10
+#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
+#define SKL_DRAM_RANK_DUAL (0x1 << 10)
+
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
@@ -10231,6 +10282,12 @@ enum skl_power_gate {
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
+#define _ICL_DSI_T_INIT_MASTER_0 0x6b088
+#define _ICL_DSI_T_INIT_MASTER_1 0x6b888
+#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \
+ _ICL_DSI_T_INIT_MASTER_0,\
+ _ICL_DSI_T_INIT_MASTER_1)
+
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 09ed48833b54..a492385b2089 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -732,13 +732,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq = kmem_cache_alloc(i915->requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
+ i915_retire_requests(i915);
+
/* Ratelimit ourselves to prevent oom from malicious clients */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto err_unreserve;
+ rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
+ &i915->drm.struct_mutex);
+ if (rq)
+ cond_synchronize_rcu(rq->rcustate);
/*
* We've forced the client to stall and catch up with whatever
@@ -758,6 +758,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
+ rq->rcustate = get_state_synchronize_rcu();
+
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
rq->engine = engine;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 9898301ab7ef..7fa94b024968 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -101,6 +101,14 @@ struct i915_request {
struct intel_signal_node signaling;
/*
+ * The rcu epoch of when this request was allocated. Used to judiciously
+ * apply backpressure on future allocations to ensure that under
+ * mempressure there is sufficient RCU ticks for us to reclaim our
+ * RCU protected slabs.
+ */
+ unsigned long rcustate;
+
+ /*
* Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1de5173e53a2..6dbeed079ae5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -24,13 +24,13 @@ enum {
DEBUG_FENCE_NOTIFY,
};
-#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
-
static void *i915_sw_fence_debug_hint(void *addr)
{
return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
}
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+
static struct debug_obj_descr i915_sw_fence_debug_descr = {
.name = "i915_sw_fence",
.debug_hint = i915_sw_fence_debug_hint,
@@ -393,10 +393,11 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
- pr_warn("asynchronous wait on fence %s:%s:%x timed out\n",
- cb->dma->ops->get_driver_name(cb->dma),
- cb->dma->ops->get_timeline_name(cb->dma),
- cb->dma->seqno);
+ pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n",
+ cb->dma->ops->get_driver_name(cb->dma),
+ cb->dma->ops->get_timeline_name(cb->dma),
+ cb->dma->seqno,
+ i915_sw_fence_debug_hint(fence));
i915_sw_fence_complete(fence);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index fa7df5fe154b..aabebe0d2e9b 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -113,71 +113,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *intel_state)
{
struct drm_plane *plane = intel_state->base.plane;
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
int ret;
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
- if (state->fb && drm_rotation_90_or_270(state->rotation)) {
- struct drm_format_name_buf format_name;
-
- if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
- DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
- return -EINVAL;
- }
-
- /*
- * 90/270 is not allowed with RGB64 16:16:16:16,
- * RGB 16-bit 5:6:5, and Indexed 8-bit.
- * TBD: Add RGB64 case once its added in supported format list.
- */
- switch (state->fb->format->format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(state->fb->format->format,
- &format_name));
- return -EINVAL;
-
- default:
- break;
- }
- }
-
- /* CHV ignores the mirror bit when the rotate bit is set :( */
- if (IS_CHERRYVIEW(dev_priv) &&
- state->rotation & DRM_MODE_ROTATE_180 &&
- state->rotation & DRM_MODE_REFLECT_X) {
- DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
- return -EINVAL;
- }
-
intel_state->base.visible = false;
ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
- /*
- * Y-tiling is not supported in IF-ID Interlace mode in
- * GEN9 and above.
- */
- if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- state->fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
- DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
- return -EINVAL;
- }
- }
-
/* FIXME pre-g4x don't work like this */
if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 14cf4c367e36..d48186e9ddad 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,6 +34,10 @@
* low-power state and comes back to normal.
*/
+#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
+MODULE_FIRMWARE(I915_CSR_ICL);
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
@@ -304,6 +308,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
if (csr->fw_path == i915_modparams.dmc_firmware_path) {
/* Bypass version check for firmware override. */
required_version = csr->version;
+ } else if (IS_ICELAKE(dev_priv)) {
+ required_version = ICL_CSR_VERSION_REQUIRED;
} else if (IS_CANNONLAKE(dev_priv)) {
required_version = CNL_CSR_VERSION_REQUIRED;
} else if (IS_GEMINILAKE(dev_priv)) {
@@ -471,6 +477,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (i915_modparams.dmc_firmware_path)
csr->fw_path = i915_modparams.dmc_firmware_path;
+ else if (IS_ICELAKE(dev_priv))
+ csr->fw_path = I915_CSR_ICL;
else if (IS_CANNONLAKE(dev_priv))
csr->fw_path = I915_CSR_CNL;
else if (IS_GEMINILAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cd01a09c5e0f..5186cd7075f9 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -916,7 +916,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (IS_ICELAKE(dev_priv)) {
- if (port == PORT_A || port == PORT_B)
+ if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port,
INTEL_OUTPUT_HDMI, &n_entries);
else
@@ -1535,7 +1535,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
uint32_t pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
- if (port == PORT_A || port == PORT_B) {
+ if (intel_port_is_combophy(dev_priv, port)) {
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
else
@@ -2077,7 +2077,7 @@ out:
static inline enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
{
- /* CNL HW requires corresponding AUX IOs to be powered up for PSR with
+ /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
* DC states enabled at the same time, while for driver initiated AUX
* transfers we need the same AUX IOs to be powered but with DC states
* disabled. Accordingly use the AUX power domain here which leaves DC
@@ -2235,7 +2235,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
int n_entries;
if (IS_ICELAKE(dev_priv)) {
- if (port == PORT_A || port == PORT_B)
+ if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
&n_entries);
else
@@ -2669,9 +2669,10 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 level,
enum intel_output_type type)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- if (port == PORT_A || port == PORT_B)
+ if (intel_port_is_combophy(dev_priv, port))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
@@ -2732,6 +2733,21 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
return DDI_BUF_TRANS_SELECT(level);
}
+static inline
+uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (intel_port_is_combophy(dev_priv, port)) {
+ return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+
+ return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
+ }
+
+ return 0;
+}
+
void icl_map_plls_to_ports(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct drm_atomic_state *old_state)
@@ -2755,16 +2771,16 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
mutex_lock(&dev_priv->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & DPCLKA_CFGCR0_DDI_CLK_OFF(port)) == 0);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
- if (port == PORT_A || port == PORT_B) {
+ if (intel_port_is_combophy(dev_priv, port)) {
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
POSTING_READ(DPCLKA_CFGCR0_ICL);
}
- val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
mutex_unlock(&dev_priv->dpll_lock);
@@ -2792,7 +2808,7 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
mutex_lock(&dev_priv->dpll_lock);
I915_WRITE(DPCLKA_CFGCR0_ICL,
I915_READ(DPCLKA_CFGCR0_ICL) |
- DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ icl_dpclka_cfgcr0_clk_off(dev_priv, port));
mutex_unlock(&dev_priv->dpll_lock);
}
}
@@ -2810,7 +2826,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
if (IS_ICELAKE(dev_priv)) {
- if (port >= PORT_C)
+ if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_pll_sel(encoder, pll));
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2852,7 +2868,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
enum port port = encoder->port;
if (IS_ICELAKE(dev_priv)) {
- if (port >= PORT_C)
+ if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5711cb701760..9741cc419e1b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1917,10 +1917,10 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
}
static unsigned int
-intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int cpp = fb->format->cpp[plane];
+ unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -1931,7 +1931,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
- if (plane == 1)
+ if (color_plane == 1)
return 128;
/* fall through */
case I915_FORMAT_MOD_Y_TILED:
@@ -1940,7 +1940,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (plane == 1)
+ if (color_plane == 1)
return 128;
/* fall through */
case I915_FORMAT_MOD_Yf_TILED:
@@ -1965,22 +1965,22 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
}
static unsigned int
-intel_tile_height(const struct drm_framebuffer *fb, int plane)
+intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 1;
else
return intel_tile_size(to_i915(fb->dev)) /
- intel_tile_width_bytes(fb, plane);
+ intel_tile_width_bytes(fb, color_plane);
}
/* Return the tile dimensions in pixel units */
-static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
+static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
unsigned int *tile_width,
unsigned int *tile_height)
{
- unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
- unsigned int cpp = fb->format->cpp[plane];
+ unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
+ unsigned int cpp = fb->format->cpp[color_plane];
*tile_width = tile_width_bytes / cpp;
*tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
@@ -1988,9 +1988,9 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
unsigned int
intel_fb_align_height(const struct drm_framebuffer *fb,
- int plane, unsigned int height)
+ int color_plane, unsigned int height)
{
- unsigned int tile_height = intel_tile_height(fb, plane);
+ unsigned int tile_height = intel_tile_height(fb, color_plane);
return ALIGN(height, tile_height);
}
@@ -2044,12 +2044,12 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
}
static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int plane)
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (plane == 1)
+ if (color_plane == 1)
return 4096;
switch (fb->modifier) {
@@ -2080,14 +2080,13 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- unsigned int rotation,
+ const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
@@ -2096,8 +2095,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
alignment = intel_surf_alignment(fb, 0);
- intel_fill_fb_ggtt_view(&view, fb, rotation);
-
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
@@ -2130,7 +2127,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
pinctl |= PIN_MAPPABLE;
vma = i915_gem_object_pin_to_display_plane(obj,
- alignment, &view, pinctl);
+ alignment, view, pinctl);
if (IS_ERR(vma))
goto err;
@@ -2182,13 +2179,13 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
i915_vma_put(vma);
}
-static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
+static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
unsigned int rotation)
{
if (drm_rotation_90_or_270(rotation))
- return to_intel_framebuffer(fb)->rotated[plane].pitch;
+ return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
else
- return fb->pitches[plane];
+ return fb->pitches[color_plane];
}
/*
@@ -2199,11 +2196,11 @@ static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
*/
u32 intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
- int plane)
+ int color_plane)
{
const struct drm_framebuffer *fb = state->base.fb;
- unsigned int cpp = fb->format->cpp[plane];
- unsigned int pitch = fb->pitches[plane];
+ unsigned int cpp = fb->format->cpp[color_plane];
+ unsigned int pitch = state->color_plane[color_plane].stride;
return y * pitch + x * cpp;
}
@@ -2215,28 +2212,28 @@ u32 intel_fb_xy_to_linear(int x, int y,
*/
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state,
- int plane)
+ int color_plane)
{
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
unsigned int rotation = state->base.rotation;
if (drm_rotation_90_or_270(rotation)) {
- *x += intel_fb->rotated[plane].x;
- *y += intel_fb->rotated[plane].y;
+ *x += intel_fb->rotated[color_plane].x;
+ *y += intel_fb->rotated[color_plane].y;
} else {
- *x += intel_fb->normal[plane].x;
- *y += intel_fb->normal[plane].y;
+ *x += intel_fb->normal[color_plane].x;
+ *y += intel_fb->normal[color_plane].y;
}
}
-static u32 __intel_adjust_tile_offset(int *x, int *y,
- unsigned int tile_width,
- unsigned int tile_height,
- unsigned int tile_size,
- unsigned int pitch_tiles,
- u32 old_offset,
- u32 new_offset)
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
{
unsigned int pitch_pixels = pitch_tiles * tile_width;
unsigned int tiles;
@@ -2257,14 +2254,15 @@ static u32 __intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
-static u32 _intel_adjust_tile_offset(int *x, int *y,
- const struct drm_framebuffer *fb, int plane,
- unsigned int rotation,
- u32 old_offset, u32 new_offset)
+static u32 intel_adjust_aligned_offset(int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation,
+ unsigned int pitch,
+ u32 old_offset, u32 new_offset)
{
- const struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int cpp = fb->format->cpp[plane];
- unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[color_plane];
WARN_ON(new_offset > old_offset);
@@ -2273,7 +2271,7 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
unsigned int pitch_tiles;
tile_size = intel_tile_size(dev_priv);
- intel_tile_dims(fb, plane, &tile_width, &tile_height);
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
@@ -2282,9 +2280,9 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
pitch_tiles = pitch / (tile_width * cpp);
}
- __intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- old_offset, new_offset);
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ old_offset, new_offset);
} else {
old_offset += *y * pitch + *x * cpp;
@@ -2299,17 +2297,19 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
* Adjust the tile offset by moving the difference into
* the x/y offsets.
*/
-static u32 intel_adjust_tile_offset(int *x, int *y,
- const struct intel_plane_state *state, int plane,
- u32 old_offset, u32 new_offset)
-{
- return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
- state->base.rotation,
- old_offset, new_offset);
+static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset)
+{
+ return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
+ state->base.rotation,
+ state->color_plane[color_plane].stride,
+ old_offset, new_offset);
}
/*
- * Computes the linear offset to the base tile and adjusts
+ * Computes the aligned offset to the base tile and adjusts
* x, y. bytes per pixel is assumed to be a power-of-two.
*
* In the 90/270 rotated case, x and y are assumed
@@ -2322,15 +2322,16 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
* used. This is why the user has to pass in the pitch since it
* is specified in the rotated orientation.
*/
-static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
- int *x, int *y,
- const struct drm_framebuffer *fb, int plane,
- unsigned int pitch,
- unsigned int rotation,
- u32 alignment)
+static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int pitch,
+ unsigned int rotation,
+ u32 alignment)
{
uint64_t fb_modifier = fb->modifier;
- unsigned int cpp = fb->format->cpp[plane];
+ unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
if (alignment)
@@ -2341,7 +2342,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
unsigned int tile_rows, tiles, pitch_tiles;
tile_size = intel_tile_size(dev_priv);
- intel_tile_dims(fb, plane, &tile_width, &tile_height);
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
@@ -2359,9 +2360,9 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
offset_aligned = offset & ~alignment;
- __intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- offset, offset_aligned);
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
} else {
offset = *y * pitch + *x * cpp;
offset_aligned = offset & ~alignment;
@@ -2373,42 +2374,44 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
return offset_aligned;
}
-u32 intel_compute_tile_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int plane)
+static u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
{
struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
const struct drm_framebuffer *fb = state->base.fb;
unsigned int rotation = state->base.rotation;
- int pitch = intel_fb_pitch(fb, plane, rotation);
+ int pitch = state->color_plane[color_plane].stride;
u32 alignment;
if (intel_plane->id == PLANE_CURSOR)
alignment = intel_cursor_alignment(dev_priv);
else
- alignment = intel_surf_alignment(fb, plane);
+ alignment = intel_surf_alignment(fb, color_plane);
- return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
- rotation, alignment);
+ return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
+ pitch, rotation, alignment);
}
/* Convert the fb->offset[] into x/y offsets */
static int intel_fb_offset_to_xy(int *x, int *y,
- const struct drm_framebuffer *fb, int plane)
+ const struct drm_framebuffer *fb,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[plane] % intel_tile_size(dev_priv))
+ fb->offsets[color_plane] % intel_tile_size(dev_priv))
return -EINVAL;
*x = 0;
*y = 0;
- _intel_adjust_tile_offset(x, y,
- fb, plane, DRM_MODE_ROTATE_0,
- fb->offsets[plane], 0);
+ intel_adjust_aligned_offset(x, y,
+ fb, color_plane, DRM_MODE_ROTATE_0,
+ fb->pitches[color_plane],
+ fb->offsets[color_plane], 0);
return 0;
}
@@ -2565,9 +2568,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
intel_fb->normal[i].x = x;
intel_fb->normal[i].y = y;
- offset = _intel_compute_tile_offset(dev_priv, &x, &y,
- fb, i, fb->pitches[i],
- DRM_MODE_ROTATE_0, tile_size);
+ offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
+ fb->pitches[i],
+ DRM_MODE_ROTATE_0,
+ tile_size);
offset /= tile_size;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -2614,10 +2618,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets.
*/
- __intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset_rotated * tile_size, 0);
+ intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2636,9 +2640,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
max_size = max(max_size, offset + size);
}
- if (max_size * tile_size > obj->base.size) {
- DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
- max_size * tile_size, obj->base.size);
+ if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+ DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
+ mul_u32_u32(max_size, tile_size), obj->base.size);
return -EINVAL;
}
@@ -2718,6 +2722,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (size_aligned * 2 > dev_priv->stolen_usable_size)
return false;
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
+ return false;
+ }
+
mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
@@ -2727,8 +2742,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (!obj)
return false;
- if (plane_config->tiling == I915_TILING_X)
- obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ return false;
+ }
mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
@@ -2760,20 +2784,33 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
plane_state->base.visible = visible;
- /* FIXME pre-g4x don't work like this */
- if (visible) {
+ if (visible)
crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
- crtc_state->active_planes |= BIT(plane->id);
- } else {
+ else
crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
- crtc_state->active_planes &= ~BIT(plane->id);
- }
DRM_DEBUG_KMS("%s active planes 0x%x\n",
crtc_state->base.crtc->name,
crtc_state->active_planes);
}
+static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_plane *plane;
+
+ /*
+ * Active_planes aliases if multiple "primary" or cursor planes
+ * have been used on the same (or wrong) pipe. plane_mask uses
+ * unique ids, hence we can use that to reconstruct active_planes.
+ */
+ crtc_state->active_planes = 0;
+
+ drm_for_each_plane_mask(plane, &dev_priv->drm,
+ crtc_state->base.plane_mask)
+ crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+}
+
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
@@ -2783,6 +2820,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
to_intel_plane_state(plane->base.state);
intel_set_plane_visible(crtc_state, plane_state, false);
+ fixup_active_planes(crtc_state);
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
@@ -2801,7 +2839,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
- struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane_state);
@@ -2853,10 +2890,15 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ intel_fill_fb_ggtt_view(&intel_state->view, fb,
+ intel_state->base.rotation);
+ intel_state->color_plane[0].stride =
+ intel_fb_pitch(fb, 0, intel_state->base.rotation);
+
mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
- primary->state->rotation,
+ &intel_state->view,
intel_plane_uses_fence(intel_state),
&intel_state->flags);
mutex_unlock(&dev->struct_mutex);
@@ -2891,18 +2933,15 @@ valid_fb:
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
- intel_set_plane_visible(to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state),
- true);
-
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&obj->frontbuffer_bits);
}
-static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
+static int skl_max_plane_width(const struct drm_framebuffer *fb,
+ int color_plane,
unsigned int rotation)
{
- int cpp = fb->format->cpp[plane];
+ int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -2950,9 +2989,9 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
const struct drm_framebuffer *fb = plane_state->base.fb;
int hsub = fb->format->hsub;
int vsub = fb->format->vsub;
- int aux_x = plane_state->aux.x;
- int aux_y = plane_state->aux.y;
- u32 aux_offset = plane_state->aux.offset;
+ int aux_x = plane_state->color_plane[1].x;
+ int aux_y = plane_state->color_plane[1].y;
+ u32 aux_offset = plane_state->color_plane[1].offset;
u32 alignment = intel_surf_alignment(fb, 1);
while (aux_offset >= main_offset && aux_y <= main_y) {
@@ -2966,8 +3005,8 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
x = aux_x / hsub;
y = aux_y / vsub;
- aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
- aux_offset, aux_offset - alignment);
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
+ aux_offset, aux_offset - alignment);
aux_x = x * hsub + aux_x % hsub;
aux_y = y * vsub + aux_y % vsub;
}
@@ -2975,30 +3014,24 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
if (aux_x != main_x || aux_y != main_y)
return false;
- plane_state->aux.offset = aux_offset;
- plane_state->aux.x = aux_x;
- plane_state->aux.y = aux_y;
+ plane_state->color_plane[1].offset = aux_offset;
+ plane_state->color_plane[1].x = aux_x;
+ plane_state->color_plane[1].y = aux_y;
return true;
}
-static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16;
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
- int dst_x = plane_state->base.dst.x1;
- int dst_w = drm_rect_width(&plane_state->base.dst);
- int pipe_src_w = crtc_state->pipe_src_w;
int max_width = skl_max_plane_width(fb, 0, rotation);
int max_height = 4096;
- u32 alignment, offset, aux_offset = plane_state->aux.offset;
+ u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
@@ -3006,26 +3039,8 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- /*
- * Display WA #1175: cnl,glk
- * Planes other than the cursor may cause FIFO underflow and display
- * corruption if starting less than 4 pixels from the right edge of
- * the screen.
- * Besides the above WA fix the similar problem, where planes other
- * than the cursor ending less than 4 pixels from the left edge of the
- * screen may cause FIFO underflow and display corruption.
- */
- if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
- DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- dst_x + dst_w < 4 ? "end" : "start",
- dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
- 4, pipe_src_w - 4);
- return -ERANGE;
- }
-
intel_add_fb_offsets(&x, &y, plane_state, 0);
- offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
/*
@@ -3034,8 +3049,8 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
* sure that is what we will get.
*/
if (offset > aux_offset)
- offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
- offset, aux_offset & ~(alignment - 1));
+ offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ offset, aux_offset & ~(alignment - 1));
/*
* When using an X-tiled surface, the plane blows up
@@ -3046,14 +3061,14 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((x + w) * cpp > fb->pitches[0]) {
+ while ((x + w) * cpp > plane_state->color_plane[0].stride) {
if (offset == 0) {
DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
- offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
+ offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
}
}
@@ -3066,26 +3081,25 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
if (offset == 0)
break;
- offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
+ offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
}
- if (x != plane_state->aux.x || y != plane_state->aux.y) {
+ if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
- plane_state->main.offset = offset;
- plane_state->main.x = x;
- plane_state->main.y = y;
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = x;
+ plane_state->color_plane[0].y = y;
return 0;
}
static int
-skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+skl_check_nv12_surface(struct intel_plane_state *plane_state)
{
/* Display WA #1106 */
if (plane_state->base.rotation !=
@@ -3119,7 +3133,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
u32 offset;
intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+ offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
@@ -3128,9 +3142,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
- plane_state->aux.offset = offset;
- plane_state->aux.x = x;
- plane_state->aux.y = y;
+ plane_state->color_plane[1].offset = offset;
+ plane_state->color_plane[1].x = x;
+ plane_state->color_plane[1].y = y;
return 0;
}
@@ -3146,34 +3160,29 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int y = src_y / vsub;
u32 offset;
- if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
- DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
- plane_state->base.rotation);
- return -EINVAL;
- }
-
intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+ offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
- plane_state->aux.offset = offset;
- plane_state->aux.x = x * hsub + src_x % hsub;
- plane_state->aux.y = y * vsub + src_y % vsub;
+ plane_state->color_plane[1].offset = offset;
+ plane_state->color_plane[1].x = x * hsub + src_x % hsub;
+ plane_state->color_plane[1].y = y * vsub + src_y % vsub;
return 0;
}
-int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int ret;
- if (rotation & DRM_MODE_REFLECT_X &&
- fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
- return -EINVAL;
- }
+ intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
+ plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
+ plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
+
+ ret = intel_plane_check_stride(plane_state);
+ if (ret)
+ return ret;
if (!plane_state->base.visible)
return 0;
@@ -3189,7 +3198,7 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
* the main surface setup depends on it.
*/
if (fb->format->format == DRM_FORMAT_NV12) {
- ret = skl_check_nv12_surface(crtc_state, plane_state);
+ ret = skl_check_nv12_surface(plane_state);
if (ret)
return ret;
ret = skl_check_nv12_aux_surface(plane_state);
@@ -3200,18 +3209,45 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
if (ret)
return ret;
} else {
- plane_state->aux.offset = ~0xfff;
- plane_state->aux.x = 0;
- plane_state->aux.y = 0;
+ plane_state->color_plane[1].offset = ~0xfff;
+ plane_state->color_plane[1].x = 0;
+ plane_state->color_plane[1].y = 0;
}
- ret = skl_check_main_surface(crtc_state, plane_state);
+ ret = skl_check_main_surface(plane_state);
if (ret)
return ret;
return 0;
}
+unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ if (!HAS_GMCH_DISPLAY(dev_priv)) {
+ return 32*1024;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 16*1024;
+ else
+ return 32*1024;
+ } else if (INTEL_GEN(dev_priv) >= 3) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ if (plane->i9xx_plane == PLANE_C)
+ return 4*1024;
+ else
+ return 8*1024;
+ }
+}
+
static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -3278,21 +3314,30 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
int src_x = plane_state->base.src.x1 >> 16;
int src_y = plane_state->base.src.y1 >> 16;
u32 offset;
+ int ret;
+
+ intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
+ plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
+
+ ret = intel_plane_check_stride(plane_state);
+ if (ret)
+ return ret;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- offset = intel_compute_tile_offset(&src_x, &src_y,
- plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
else
offset = 0;
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->base.rotation;
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
int src_h = drm_rect_height(&plane_state->base.src) >> 16;
@@ -3304,9 +3349,43 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
}
}
- plane_state->main.offset = offset;
- plane_state->main.x = src_x;
- plane_state->main.y = src_y;
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
+
+ if (!plane_state->base.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
return 0;
}
@@ -3316,20 +3395,19 @@ static void i9xx_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
i915_reg_t reg = DSPCNTR(i9xx_plane);
- int x = plane_state->main.x;
- int y = plane_state->main.y;
+ int x = plane_state->color_plane[0].x;
+ int y = plane_state->color_plane[0].y;
unsigned long irqflags;
u32 dspaddr_offset;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- dspaddr_offset = plane_state->main.offset;
+ dspaddr_offset = plane_state->color_plane[0].offset;
else
dspaddr_offset = linear_offset;
@@ -3353,7 +3431,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
I915_WRITE_FW(reg, dspcntr);
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
@@ -3428,12 +3506,12 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
}
static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
{
if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 64;
else
- return intel_tile_width_bytes(fb, plane);
+ return intel_tile_width_bytes(fb, color_plane);
}
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3463,24 +3541,24 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
}
}
-u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
- unsigned int rotation)
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int color_plane)
{
- u32 stride;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ u32 stride = plane_state->color_plane[color_plane].stride;
- if (plane >= fb->format->num_planes)
+ if (color_plane >= fb->format->num_planes)
return 0;
- stride = intel_fb_pitch(fb, plane, rotation);
-
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
if (drm_rotation_90_or_270(rotation))
- stride /= intel_tile_height(fb, plane);
+ stride /= intel_tile_height(fb, color_plane);
else
- stride /= intel_fb_stride_alignment(fb, plane);
+ stride /= intel_fb_stride_alignment(fb, color_plane);
return stride;
}
@@ -5883,6 +5961,17 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (port == PORT_NONE)
+ return false;
+
+ if (IS_ICELAKE(dev_priv))
+ return port <= PORT_B;
+
+ return false;
+}
+
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
{
if (IS_ICELAKE(dev_priv))
@@ -6014,6 +6103,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_set_pipeconf(intel_crtc);
+ intel_color_set_csc(&pipe_config->base);
+
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -6113,8 +6204,8 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
assert_pipe_disabled(dev_priv, crtc->pipe);
- DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
- I915_READ(PFIT_CONTROL));
+ DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
+ I915_READ(PFIT_CONTROL));
I915_WRITE(PFIT_CONTROL, 0);
}
@@ -8634,8 +8725,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
@@ -8805,6 +8896,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
+ plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
else
@@ -9202,8 +9294,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
}
@@ -9592,7 +9684,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
else
base = intel_plane_ggtt_offset(plane_state);
- base += plane_state->main.offset;
+ base += plane_state->color_plane[0].offset;
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev_priv) &&
@@ -9635,55 +9727,86 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
height > 0 && height <= config->cursor_height;
}
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
int src_x, src_y;
u32 offset;
int ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
+ plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
+
+ ret = intel_plane_check_stride(plane_state);
if (ret)
return ret;
- if (!fb)
- return 0;
-
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
- return -EINVAL;
- }
-
src_x = plane_state->base.src_x >> 16;
src_y = plane_state->base.src_y >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
if (src_x != 0 || src_y != 0) {
DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
return -EINVAL;
}
- plane_state->main.offset = offset;
+ plane_state->color_plane[0].offset = offset;
return 0;
}
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
+ int ret;
+
+ if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!plane_state->base.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 2048;
+}
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
return CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(fb->pitches[0]);
+ CURSOR_STRIDE(plane_state->color_plane[0].stride);
}
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
@@ -9719,6 +9842,9 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
+ WARN_ON(plane_state->base.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
switch (fb->pitches[0]) {
case 256:
case 512:
@@ -9807,6 +9933,14 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
return ret;
}
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return plane->base.dev->mode_config.cursor_width * 4;
+}
+
static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -9912,6 +10046,9 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
+ WARN_ON(plane_state->base.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
fb->pitches[0], plane_state->base.crtc_w);
@@ -12982,7 +13119,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
}
vma = intel_pin_and_fence_fb_obj(fb,
- plane_state->base.rotation,
+ &plane_state->view,
intel_plane_uses_fence(plane_state),
&plane_state->flags);
if (IS_ERR(vma))
@@ -13160,19 +13297,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
}
int
-skl_max_scale(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- uint32_t pixel_format)
+skl_max_scale(const struct intel_crtc_state *crtc_state,
+ u32 pixel_format)
{
- struct drm_i915_private *dev_priv;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int max_scale, mult;
int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
- if (!intel_crtc || !crtc_state->base.enable)
+ if (!crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
- dev_priv = to_i915(intel_crtc->base.dev);
-
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
@@ -13196,61 +13331,6 @@ skl_max_scale(struct intel_crtc *intel_crtc,
return max_scale;
}
-static int
-intel_check_primary_plane(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *state)
-{
- struct intel_plane *plane = to_intel_plane(state->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_crtc *crtc = state->base.crtc;
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
- bool can_position = false;
- int ret;
- uint32_t pixel_format = 0;
-
- if (INTEL_GEN(dev_priv) >= 9) {
- /* use scaler when colorkey is not required */
- if (!state->ckey.flags) {
- min_scale = 1;
- if (state->base.fb)
- pixel_format = state->base.fb->format->format;
- max_scale = skl_max_scale(to_intel_crtc(crtc),
- crtc_state, pixel_format);
- }
- can_position = true;
- }
-
- ret = drm_atomic_helper_check_plane_state(&state->base,
- &crtc_state->base,
- min_scale, max_scale,
- can_position, true);
- if (ret)
- return ret;
-
- if (!state->base.fb)
- return 0;
-
- if (INTEL_GEN(dev_priv) >= 9) {
- ret = skl_check_plane_surface(crtc_state, state);
- if (ret)
- return ret;
-
- state->ctl = skl_plane_ctl(crtc_state, state);
- } else {
- ret = i9xx_check_plane_surface(state);
- if (ret)
- return ret;
-
- state->ctl = i9xx_plane_ctl(crtc_state, state);
- }
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- state->color_ctl = glk_plane_color_ctl(crtc_state, state);
-
- return 0;
-}
-
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -13672,12 +13752,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->base.state = &state->base;
- primary->can_scale = false;
- primary->max_downscale = 1;
- if (INTEL_GEN(dev_priv) >= 9) {
- primary->can_scale = true;
+ if (INTEL_GEN(dev_priv) >= 9)
state->scaler_id = -1;
- }
primary->pipe = pipe;
/*
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
@@ -13704,8 +13780,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
}
- primary->check_plane = intel_check_primary_plane;
-
if (INTEL_GEN(dev_priv) >= 9) {
primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
PLANE_PRIMARY);
@@ -13723,9 +13797,11 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
else
modifiers = skl_format_modifiers_noccs;
+ primary->max_stride = skl_plane_max_stride;
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
+ primary->check_plane = skl_plane_check;
plane_funcs = &skl_plane_funcs;
} else if (INTEL_GEN(dev_priv) >= 4) {
@@ -13733,9 +13809,11 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i965_primary_formats);
modifiers = i9xx_format_modifiers;
+ primary->max_stride = i9xx_plane_max_stride;
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+ primary->check_plane = i9xx_plane_check;
plane_funcs = &i965_plane_funcs;
} else {
@@ -13743,9 +13821,11 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i8xx_primary_formats);
modifiers = i9xx_format_modifiers;
+ primary->max_stride = i9xx_plane_max_stride;
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+ primary->check_plane = i9xx_plane_check;
plane_funcs = &i8xx_plane_funcs;
}
@@ -13842,19 +13922,19 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
cursor->base.state = &state->base;
- cursor->can_scale = false;
- cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->max_stride = i845_cursor_max_stride;
cursor->update_plane = i845_update_cursor;
cursor->disable_plane = i845_disable_cursor;
cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
+ cursor->max_stride = i9xx_cursor_max_stride;
cursor->update_plane = i9xx_update_cursor;
cursor->disable_plane = i9xx_disable_cursor;
cursor->get_hw_state = i9xx_cursor_get_hw_state;
@@ -14380,31 +14460,18 @@ static
u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 gen = INTEL_GEN(dev_priv);
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
- if (gen >= 9) {
- int cpp = drm_format_plane_cpp(pixel_format, 0);
+ /*
+ * We assume the primary plane for pipe A has
+ * the highest stride limits of them all.
+ */
+ crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ plane = to_intel_plane(crtc->base.primary);
- /* "The stride in bytes must not exceed the of the size of 8K
- * pixels and 32K bytes."
- */
- return min(8192 * cpp, 32768);
- } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
- return 32*1024;
- } else if (gen >= 4) {
- if (fb_modifier == I915_FORMAT_MOD_X_TILED)
- return 16*1024;
- else
- return 32*1024;
- } else if (gen >= 3) {
- if (fb_modifier == I915_FORMAT_MOD_X_TILED)
- return 8*1024;
- else
- return 16*1024;
- } else {
- /* XXX DSPC is limited to 4k tiled */
- return 8*1024;
- }
+ return plane->max_stride(plane, pixel_format, fb_modifier,
+ DRM_MODE_ROTATE_0);
}
static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
@@ -15424,17 +15491,6 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(DPLL(pipe));
}
-static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
- struct intel_plane *plane)
-{
- enum pipe pipe;
-
- if (!plane->get_hw_state(plane, &pipe))
- return true;
-
- return pipe == crtc->pipe;
-}
-
static void
intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
{
@@ -15446,13 +15502,20 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
+ struct intel_crtc *plane_crtc;
+ enum pipe pipe;
- if (intel_plane_mapping_ok(crtc, plane))
+ if (!plane->get_hw_state(plane, &pipe))
+ continue;
+
+ if (pipe == crtc->pipe)
continue;
DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
plane->base.name);
- intel_plane_disable_noatomic(crtc, plane);
+
+ plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ intel_plane_disable_noatomic(plane_crtc, plane);
}
}
@@ -15500,13 +15563,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
- /* restore vblank interrupts to correct state */
- drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
struct intel_plane *plane;
- drm_crtc_vblank_on(&crtc->base);
-
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
const struct intel_plane_state *plane_state =
@@ -15624,23 +15683,32 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
}
/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct intel_crtc *crtc)
+static void readout_plane_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
+ struct intel_crtc *crtc;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- enum pipe pipe;
+ struct intel_crtc_state *crtc_state;
+ enum pipe pipe = PIPE_A;
bool visible;
visible = plane->get_hw_state(plane, &pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
intel_set_plane_visible(crtc_state, plane_state, visible);
}
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ fixup_active_planes(crtc_state);
+ }
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15672,13 +15740,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (crtc_state->base.active)
dev_priv->active_crtcs |= 1 << crtc->pipe;
- readout_plane_state(crtc);
-
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
enableddisabled(crtc_state->base.active));
}
+ readout_plane_state(dev_priv);
+
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -15848,7 +15916,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
int i;
@@ -15861,15 +15928,23 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
- intel_sanitize_plane_mapping(dev_priv);
+ /*
+ * intel_sanitize_plane_mapping() may need to do vblank
+ * waits, so we need vblank interrupts restored beforehand.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ drm_crtc_vblank_reset(&crtc->base);
- for_each_intel_encoder(dev, encoder) {
- intel_sanitize_encoder(encoder);
+ if (crtc->active)
+ drm_crtc_vblank_on(&crtc->base);
}
- for_each_pipe(dev_priv, pipe) {
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ intel_sanitize_plane_mapping(dev_priv);
+
+ for_each_intel_encoder(dev, encoder)
+ intel_sanitize_encoder(encoder);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6b4c19123f2a..3fae4dab295f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -557,6 +557,22 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
return true;
}
+static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
+ int link_rate,
+ uint8_t lane_count)
+{
+ const struct drm_display_mode *fixed_mode =
+ intel_dp->attached_connector->panel.fixed_mode;
+ int mode_rate, max_rate;
+
+ mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
+ max_rate = intel_dp_max_data_rate(link_rate, lane_count);
+ if (mode_rate > max_rate)
+ return false;
+
+ return true;
+}
+
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count)
{
@@ -566,9 +582,23 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
intel_dp->num_common_rates,
link_rate);
if (index > 0) {
+ if (intel_dp_is_edp(intel_dp) &&
+ !intel_dp_can_link_train_fallback_for_edp(intel_dp,
+ intel_dp->common_rates[index - 1],
+ lane_count)) {
+ DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ return 0;
+ }
intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
intel_dp->max_link_lane_count = lane_count;
} else if (lane_count > 1) {
+ if (intel_dp_is_edp(intel_dp) &&
+ !intel_dp_can_link_train_fallback_for_edp(intel_dp,
+ intel_dp_max_common_rate(intel_dp),
+ lane_count >> 1)) {
+ DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ return 0;
+ }
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->max_link_lane_count = lane_count >> 1;
} else {
@@ -3704,7 +3734,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
signal_levels = bxt_signal_levels(intel_dp);
} else if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index a9f40985a621..30be0e39bd5f 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -367,22 +367,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
return;
failure_handling:
- /* Dont fallback and prune modes if its eDP */
- if (!intel_dp_is_edp(intel_dp)) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
- if (!intel_dp_get_link_train_fallback_values(intel_dp,
- intel_dp->link_rate,
- intel_dp->lane_count))
- /* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
- } else {
- DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
- }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
+ if (!intel_dp_get_link_train_fallback_values(intel_dp,
+ intel_dp->link_rate,
+ intel_dp->lane_count))
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 43db2e9ac575..7f155b4f1a7d 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -38,11 +38,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
+ struct drm_connector *connector = conn_state->connector;
+ void *port = to_intel_connector(connector)->port;
struct drm_atomic_state *state = pipe_config->base.state;
int bpp;
- int lane_count, slots;
+ int lane_count, slots = 0;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
@@ -70,17 +70,23 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
+ if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
pipe_config->has_audio = true;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- connector->port, mst_pbn);
- if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
- return false;
+ /* Zombie connectors can't have VCPI slots */
+ if (READ_ONCE(connector->registered)) {
+ slots = drm_dp_atomic_find_vcpi_slots(state,
+ &intel_dp->mst_mgr,
+ port,
+ mst_pbn);
+ if (slots < 0) {
+ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
+ slots);
+ return false;
+ }
}
intel_link_compute_m_n(bpp, lane_count,
@@ -307,9 +313,8 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!intel_dp) {
+ if (!READ_ONCE(connector->registered))
return intel_connector_update_modes(connector, NULL);
- }
edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, edid);
@@ -324,9 +329,10 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_dp)
+ if (!READ_ONCE(connector->registered))
return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
+ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
+ intel_connector->port);
}
static void
@@ -366,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
- if (!intel_dp)
+ if (!READ_ONCE(connector->registered))
return MODE_ERROR;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -398,8 +404,6 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(state->crtc);
- if (!intel_dp)
- return NULL;
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
@@ -499,7 +503,6 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
@@ -508,10 +511,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
if (dev_priv->fbdev)
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
connector);
- /* prevent race with the check in ->detect */
- drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL);
- intel_connector->mst_port = NULL;
- drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
drm_connector_put(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f5731215210a..f8dc84b2d2d3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -497,18 +497,21 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
+ struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long flags;
#define PLANE_HAS_FENCE BIT(0)
struct {
u32 offset;
+ /*
+ * Plane stride in:
+ * bytes for 0/180 degree rotation
+ * pixels for 90/270 degree rotation
+ */
+ u32 stride;
int x, y;
- } main;
- struct {
- u32 offset;
- int x, y;
- } aux;
+ } color_plane[2];
/* plane control register */
u32 ctl;
@@ -950,10 +953,8 @@ struct intel_plane {
enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
- bool can_scale;
bool has_fbc;
bool has_ccs;
- int max_downscale;
uint32_t frontbuffer_bit;
struct {
@@ -966,6 +967,9 @@ struct intel_plane {
* the intel_plane_state structure and accessed via plane_state.
*/
+ unsigned int (*max_stride)(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
@@ -1442,7 +1446,7 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
struct drm_atomic_state *old_state);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int plane, unsigned int height);
+ int color_plane, unsigned int height);
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
@@ -1513,6 +1517,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
@@ -1565,7 +1570,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- unsigned int rotation,
+ const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags);
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
@@ -1614,8 +1619,6 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-u32 intel_compute_tile_offset(int *x, int *y,
- const struct intel_plane_state *state, int plane);
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1645,8 +1648,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
- uint32_t pixel_format);
+int skl_max_scale(const struct intel_crtc_state *crtc_state,
+ u32 pixel_format);
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
@@ -1658,12 +1661,14 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_color_ctl(const struct intel_plane_state *plane_state);
-u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
- unsigned int rotation);
-int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int plane);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
@@ -2131,6 +2136,14 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
+unsigned int skl_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+int intel_plane_check_stride(const struct intel_plane_state *plane_state);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 10cd051ba29e..217ed3ee1cab 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -990,6 +990,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
}
local_bh_enable();
+ /* Otherwise flush the tasklet if it was on another cpu */
+ tasklet_unlock_wait(t);
+
if (READ_ONCE(engine->execlists.active))
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 01d1d2088f04..74d425c700ef 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -670,8 +670,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible;
- cache->plane.adjusted_x = plane_state->main.x;
- cache->plane.adjusted_y = plane_state->main.y;
+ cache->plane.adjusted_x = plane_state->color_plane[0].x;
+ cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->base.src.y1 >> 16;
if (!cache->plane.visible)
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb2f9fce34cd..f99332972b7a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -175,6 +175,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ const struct i915_ggtt_view view = {
+ .type = I915_GGTT_VIEW_NORMAL,
+ };
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -214,8 +217,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* BIOS is suitable for own access.
*/
vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
- DRM_MODE_ROTATE_0,
- false, &flags);
+ &view, false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 07b9d313b019..a81f04d46e87 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -557,16 +557,36 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
- engine)->lrc_desc);
+ struct intel_context *ce = to_intel_context(client->owner, engine);
u32 data[7];
- /*
- * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
- * See guc_fill_preempt_context().
- */
+ if (!ce->ring->emit) { /* recreate upon load/resume */
+ u32 addr = intel_hws_preempt_done_address(engine);
+ u32 *cs;
+
+ cs = ce->ring->vaddr;
+ if (engine->id == RCS) {
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ GUC_PREEMPT_FINISHED,
+ addr);
+ } else {
+ cs = gen8_emit_ggtt_write(cs,
+ GUC_PREEMPT_FINISHED,
+ addr);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
+ GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
+
+ flush_ggtt_writes(ce->ring->vma);
+ }
+
spin_lock_irq(&client->wq_lock);
- guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
spin_unlock_irq(&client->wq_lock);
@@ -1044,50 +1064,6 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
#undef SR_DISABLED
}
-static void guc_fill_preempt_context(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_guc_client *client = guc->preempt_client;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce =
- to_intel_context(client->owner, engine);
- u32 addr = intel_hws_preempt_done_address(engine);
- u32 *cs;
-
- GEM_BUG_ON(!ce->pin_count);
-
- /*
- * We rely on this context image *not* being saved after
- * preemption. This ensures that the RING_HEAD / RING_TAIL
- * remain pointing at initial values forever.
- */
- GEM_BUG_ON(!ctx_save_restore_disabled(ce));
-
- cs = ce->ring->vaddr;
- if (id == RCS) {
- cs = gen8_emit_ggtt_write_rcs(cs,
- GUC_PREEMPT_FINISHED,
- addr);
- } else {
- cs = gen8_emit_ggtt_write(cs,
- GUC_PREEMPT_FINISHED,
- addr);
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
- GUC_PREEMPT_BREADCRUMB_BYTES);
-
- flush_ggtt_writes(ce->ring->vma);
- }
-}
-
static int guc_clients_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1118,8 +1094,6 @@ static int guc_clients_create(struct intel_guc *guc)
return PTR_ERR(client);
}
guc->preempt_client = client;
-
- guc_fill_preempt_context(guc);
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b1f0e5211a0..43957bb37a42 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1294,7 +1294,7 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ err = i915_gem_object_set_to_wc_domain(vma->obj, true);
if (err)
return err;
}
@@ -1322,7 +1322,9 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto err;
- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ctx->i915) |
+ I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_vma;
@@ -1338,11 +1340,13 @@ __execlists_context_pin(struct intel_engine_cs *engine,
intel_lr_context_descriptor_update(ctx, engine, ce);
+ GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
+
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
- ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
+ ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head;
+ ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
@@ -2392,7 +2396,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
ret = intel_engine_init_common(engine);
if (ret)
- goto error;
+ return ret;
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
@@ -2434,10 +2438,6 @@ static int logical_ring_init(struct intel_engine_cs *engine)
reset_csb_pointers(execlists);
return 0;
-
-error:
- intel_logical_ring_cleanup(engine);
- return ret;
}
int logical_render_ring_init(struct intel_engine_cs *engine)
@@ -2460,10 +2460,14 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
+ ret = logical_ring_init(engine);
if (ret)
return ret;
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
+ if (ret)
+ goto err_cleanup_common;
+
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2475,7 +2479,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return logical_ring_init(engine);
+ return 0;
+
+err_cleanup_common:
+ intel_engine_cleanup_common(engine);
+ return ret;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
@@ -2841,13 +2849,14 @@ error_deref_obj:
return ret;
}
-void intel_lr_context_resume(struct drm_i915_private *dev_priv)
+void intel_lr_context_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- /* Because we emit WA_TAIL_DWORDS there may be a disparity
+ /*
+ * Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
* that stored in context. As we only write new commands from
* ce->ring->tail onwards, everything before that is junk. If the GPU
@@ -2857,28 +2866,22 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- for_each_engine(engine, dev_priv, id) {
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ for_each_engine(engine, i915, id) {
struct intel_context *ce =
to_intel_context(ctx, engine);
- u32 *reg;
if (!ce->state)
continue;
- reg = i915_gem_object_pin_map(ce->state->obj,
- I915_MAP_WB);
- if (WARN_ON(IS_ERR(reg)))
- continue;
-
- reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
- reg[CTX_RING_HEAD+1] = 0;
- reg[CTX_RING_TAIL+1] = 0;
+ intel_ring_reset(ce->ring, 0);
- ce->state->obj->mm.dirty = true;
- i915_gem_object_unpin_map(ce->state->obj);
+ if (ce->pin_count) { /* otherwise done in context_pin */
+ u32 *regs = ce->lrc_reg_state;
- intel_ring_reset(ce->ring, 0);
+ regs[CTX_RING_HEAD + 1] = ce->ring->head;
+ regs[CTX_RING_TAIL + 1] = ce->ring->tail;
+ }
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 443dfaefd7a6..72eb7e48e8bc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -487,23 +487,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
overlay->active = false;
}
-struct put_image_params {
- int format;
- short dst_x;
- short dst_y;
- short dst_w;
- short dst_h;
- short src_w;
- short src_scan_h;
- short src_scan_w;
- short src_h;
- short stride_Y;
- short stride_UV;
- int offset_Y;
- int offset_U;
- int offset_V;
-};
-
static int packed_depth_bytes(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
@@ -618,25 +601,25 @@ static void update_polyphase_filter(struct overlay_registers __iomem *regs)
static bool update_scaling_factors(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs,
- struct put_image_params *params)
+ struct drm_intel_overlay_put_image *params)
{
/* fixed point with a 12 bit shift */
u32 xscale, yscale, xscale_UV, yscale_UV;
#define FP_SHIFT 12
#define FRACT_MASK 0xfff
bool scale_changed = false;
- int uv_hscale = uv_hsubsampling(params->format);
- int uv_vscale = uv_vsubsampling(params->format);
+ int uv_hscale = uv_hsubsampling(params->flags);
+ int uv_vscale = uv_vsubsampling(params->flags);
- if (params->dst_w > 1)
- xscale = ((params->src_scan_w - 1) << FP_SHIFT)
- /(params->dst_w);
+ if (params->dst_width > 1)
+ xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
+ params->dst_width;
else
xscale = 1 << FP_SHIFT;
- if (params->dst_h > 1)
- yscale = ((params->src_scan_h - 1) << FP_SHIFT)
- /(params->dst_h);
+ if (params->dst_height > 1)
+ yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
+ params->dst_height;
else
yscale = 1 << FP_SHIFT;
@@ -713,12 +696,12 @@ static void update_colorkey(struct intel_overlay *overlay,
iowrite32(flags, &regs->DCLRKM);
}
-static u32 overlay_cmd_reg(struct put_image_params *params)
+static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
{
u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
- if (params->format & I915_OVERLAY_YUV_PLANAR) {
- switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ if (params->flags & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
case I915_OVERLAY_YUV422:
cmd |= OCMD_YUV_422_PLANAR;
break;
@@ -731,7 +714,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
break;
}
} else { /* YUV packed */
- switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
case I915_OVERLAY_YUV422:
cmd |= OCMD_YUV_422_PACKED;
break;
@@ -740,7 +723,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
break;
}
- switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ switch (params->flags & I915_OVERLAY_SWAP_MASK) {
case I915_OVERLAY_NO_SWAP:
break;
case I915_OVERLAY_UV_SWAP:
@@ -760,7 +743,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
- struct put_image_params *params)
+ struct drm_intel_overlay_put_image *params)
{
struct overlay_registers __iomem *regs = overlay->regs;
struct drm_i915_private *dev_priv = overlay->i915;
@@ -806,35 +789,40 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
- iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
+ iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS);
+ iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ);
- if (params->format & I915_OVERLAY_YUV_PACKED)
- tmp_width = packed_width_bytes(params->format, params->src_w);
+ if (params->flags & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->flags,
+ params->src_width);
else
- tmp_width = params->src_w;
+ tmp_width = params->src_width;
- swidth = params->src_w;
+ swidth = params->src_width;
swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
- sheight = params->src_h;
+ sheight = params->src_height;
iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
- if (params->format & I915_OVERLAY_YUV_PLANAR) {
- int uv_hscale = uv_hsubsampling(params->format);
- int uv_vscale = uv_vsubsampling(params->format);
+ if (params->flags & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->flags);
+ int uv_vscale = uv_vsubsampling(params->flags);
u32 tmp_U, tmp_V;
- swidth |= (params->src_w/uv_hscale) << 16;
+
+ swidth |= (params->src_width / uv_hscale) << 16;
+ sheight |= (params->src_height / uv_vscale) << 16;
+
tmp_U = calc_swidthsw(dev_priv, params->offset_U,
- params->src_w/uv_hscale);
+ params->src_width / uv_hscale);
tmp_V = calc_swidthsw(dev_priv, params->offset_V,
- params->src_w/uv_hscale);
- swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
- sheight |= (params->src_h/uv_vscale) << 16;
+ params->src_width / uv_hscale);
+ swidthsw |= max(tmp_U, tmp_V) << 16;
+
iowrite32(i915_ggtt_offset(vma) + params->offset_U,
&regs->OBUF_0U);
iowrite32(i915_ggtt_offset(vma) + params->offset_V,
&regs->OBUF_0V);
+
ostride |= params->stride_UV << 16;
}
@@ -938,15 +926,16 @@ static int check_overlay_dst(struct intel_overlay *overlay,
return -EINVAL;
}
-static int check_overlay_scaling(struct put_image_params *rec)
+static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
{
u32 tmp;
/* downscaling limit is 8.0 */
- tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
if (tmp > 7)
return -EINVAL;
- tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+
+ tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
if (tmp > 7)
return -EINVAL;
@@ -1067,13 +1056,12 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_intel_overlay_put_image *put_image_rec = data;
+ struct drm_intel_overlay_put_image *params = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
- struct put_image_params *params;
int ret;
overlay = dev_priv->overlay;
@@ -1082,7 +1070,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
- if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
@@ -1094,22 +1082,14 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return ret;
}
- params = kmalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
-
- drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id);
- if (!drmmode_crtc) {
- ret = -ENOENT;
- goto out_free;
- }
+ drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
+ if (!drmmode_crtc)
+ return -ENOENT;
crtc = to_intel_crtc(drmmode_crtc);
- new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle);
- if (!new_bo) {
- ret = -ENOENT;
- goto out_free;
- }
+ new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
+ if (!new_bo)
+ return -ENOENT;
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
@@ -1145,42 +1125,27 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay->pfit_active = false;
}
- ret = check_overlay_dst(overlay, put_image_rec);
+ ret = check_overlay_dst(overlay, params);
if (ret != 0)
goto out_unlock;
if (overlay->pfit_active) {
- params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ params->dst_y = (((u32)params->dst_y << 12) /
overlay->pfit_vscale_ratio);
/* shifting right rounds downwards, so add 1 */
- params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ params->dst_height = (((u32)params->dst_height << 12) /
overlay->pfit_vscale_ratio) + 1;
- } else {
- params->dst_y = put_image_rec->dst_y;
- params->dst_h = put_image_rec->dst_height;
}
- params->dst_x = put_image_rec->dst_x;
- params->dst_w = put_image_rec->dst_width;
-
- params->src_w = put_image_rec->src_width;
- params->src_h = put_image_rec->src_height;
- params->src_scan_w = put_image_rec->src_scan_width;
- params->src_scan_h = put_image_rec->src_scan_height;
- if (params->src_scan_h > params->src_h ||
- params->src_scan_w > params->src_w) {
+
+ if (params->src_scan_height > params->src_height ||
+ params->src_scan_width > params->src_width) {
ret = -EINVAL;
goto out_unlock;
}
- ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
+ ret = check_overlay_src(dev_priv, params, new_bo);
if (ret != 0)
goto out_unlock;
- params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
- params->stride_Y = put_image_rec->stride_Y;
- params->stride_UV = put_image_rec->stride_UV;
- params->offset_Y = put_image_rec->offset_Y;
- params->offset_U = put_image_rec->offset_U;
- params->offset_V = put_image_rec->offset_V;
/* Check scaling after src size to prevent a divide-by-zero. */
ret = check_overlay_scaling(params);
@@ -1195,16 +1160,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
- kfree(params);
-
return 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
-out_free:
- kfree(params);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d99e5fabe93c..1db9b8328275 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2875,6 +2875,16 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
}
+ /*
+ * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * If we could not get dimm info enable this WA to prevent from
+ * any underrun. If not able to get Dimm info assume 16GB dimm
+ * to avoid any underrun.
+ */
+ if (!dev_priv->dram_info.valid_dimm ||
+ dev_priv->dram_info.is_16gb_dimm)
+ wm[0] += 1;
+
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
@@ -6108,10 +6118,13 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
u32 val;
/* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv))
+ dev_priv->ipc_enabled = false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
+ !dev_priv->dram_info.symmetric_memory)
dev_priv->ipc_enabled = false;
- return;
- }
val = I915_READ(DISP_ARB_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 472939f5c18f..d0ef50bf930a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1677,9 +1677,26 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (ppgtt) {
- ret = load_pd_dir(rq, ppgtt);
- if (ret)
- goto err;
+ int loops;
+
+ /*
+ * Baytail takes a little more convincing that it really needs
+ * to reload the PD between contexts. It is not just a little
+ * longer, as adding more stalls after the load_pd_dir (i.e.
+ * adding a long loop around flush_pd_dir) is not as effective
+ * as reloading the PD umpteen times. 32 is derived from
+ * experimentation (gem_exec_parallel/fds) and has no good
+ * explanation.
+ */
+ loops = 1;
+ if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
+ loops = 32;
+
+ do {
+ ret = load_pd_dir(rq, ppgtt);
+ if (ret)
+ goto err;
+ } while (--loops);
if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
unwind_mm = intel_engine_flag(engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 480dadb1047b..0fdabce647ab 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1996,6 +1996,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
#define ICL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
@@ -3563,6 +3564,9 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 7. Setup MBUS. */
icl_mbus_init(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 812fe7b06f87..701372e512a8 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -99,32 +99,13 @@ struct intel_sdvo {
*/
uint16_t hotplug_active;
- /**
- * This is set if we're going to treat the device as TV-out.
- *
- * While we have these nice friendly flags for output types that ought
- * to decide this for us, the S-Video output on our HDMI+S-Video card
- * shows up as RGB1 (VGA).
- */
- bool is_tv;
-
enum port port;
- /**
- * This is set if we treat the device as HDMI, instead of DVI.
- */
- bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
/**
- * This is set if we detect output of sdvo device as LVDS and
- * have a valid fixed mode to use with the panel.
- */
- bool is_lvds;
-
- /**
* This is sdvo fixed pannel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
@@ -172,6 +153,11 @@ struct intel_sdvo_connector {
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
};
struct intel_sdvo_connector_state {
@@ -766,6 +752,7 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
uint16_t clock,
uint16_t width,
uint16_t height)
@@ -778,7 +765,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
- if (intel_sdvo->is_lvds &&
+ if (IS_LVDS(intel_sdvo_connector) &&
(intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
@@ -1067,6 +1054,7 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
*/
static bool
intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -1077,6 +1065,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ intel_sdvo_connector,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay))
@@ -1127,6 +1116,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector_state *intel_sdvo_state =
to_intel_sdvo_connector_state(conn_state);
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->base.mode;
@@ -1142,20 +1133,22 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
* timings, even though this isn't really the right place in
* the sequence to do it. Oh well.
*/
- if (intel_sdvo->is_tv) {
+ if (IS_TV(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ intel_sdvo_connector,
mode,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
- } else if (intel_sdvo->is_lvds) {
+ } else if (IS_LVDS(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ intel_sdvo_connector,
mode,
adjusted_mode);
}
@@ -1194,11 +1187,11 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
}
/* Clock computation needs to happen after pixel multiplier. */
- if (intel_sdvo->is_tv)
+ if (IS_TV(intel_sdvo_connector))
i9xx_adjust_sdvo_tv_clock(pipe_config);
/* Set user selected PAR to incoming mode's member */
- if (intel_sdvo->is_hdmi)
+ if (intel_sdvo_connector->is_hdmi)
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
return true;
@@ -1275,6 +1268,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
+ const struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(conn_state->connector);
const struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
@@ -1304,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* lvds has a special fixed output timing. */
- if (intel_sdvo->is_lvds)
+ if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_dtd_from_mode(&output_dtd,
intel_sdvo->sdvo_lvds_fixed_mode);
else
@@ -1325,13 +1320,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
- if (intel_sdvo->is_tv &&
+ if (IS_TV(intel_sdvo_connector) &&
!intel_sdvo_set_tv_format(intel_sdvo, conn_state))
return;
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
@@ -1630,6 +1625,8 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1644,7 +1641,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (intel_sdvo->is_lvds) {
+ if (IS_LVDS(intel_sdvo_connector)) {
if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
@@ -1759,6 +1756,8 @@ static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(connector);
enum drm_connector_status status;
struct edid *edid;
@@ -1797,7 +1796,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- if (intel_sdvo->is_hdmi) {
+ if (intel_sdvo_connector->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
intel_sdvo->rgb_quant_range_selectable =
@@ -1875,17 +1874,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
- /* May update encoder flag for like clock for SDVO TV, etc.*/
- if (ret == connector_status_connected) {
- intel_sdvo->is_tv = false;
- intel_sdvo->is_lvds = false;
-
- if (response & SDVO_TV_MASK)
- intel_sdvo->is_tv = true;
- if (response & SDVO_LVDS_MASK)
- intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
- }
-
return ret;
}
@@ -2054,16 +2042,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
-
- list_for_each_entry(newmode, &connector->probed_modes, head) {
- if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- intel_sdvo->sdvo_lvds_fixed_mode =
- drm_mode_duplicate(connector->dev, newmode);
-
- intel_sdvo->is_lvds = true;
- break;
- }
- }
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2555,7 +2533,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (INTEL_GEN(dev_priv) >= 4 &&
intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_sdvo->is_hdmi = true;
+ intel_sdvo_connector->is_hdmi = true;
}
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@@ -2563,7 +2541,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
return false;
}
- if (intel_sdvo->is_hdmi)
+ if (intel_sdvo_connector->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
return true;
@@ -2591,8 +2569,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type;
- intel_sdvo->is_tv = true;
-
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
return false;
@@ -2654,6 +2630,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_display_mode *mode;
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
@@ -2682,6 +2659,19 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
+ intel_sdvo_get_lvds_modes(connector);
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, mode);
+ break;
+ }
+ }
+
+ if (!intel_sdvo->sdvo_lvds_fixed_mode)
+ goto err;
+
return true;
err:
@@ -2692,9 +2682,6 @@ err:
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
- intel_sdvo->is_tv = false;
- intel_sdvo->is_lvds = false;
-
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9600ccfc5b76..5fd2f7bf3927 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -230,6 +230,78 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
#endif
}
+int intel_plane_check_stride(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ u32 stride, max_stride;
+
+ /* FIXME other color planes? */
+ stride = plane_state->color_plane[0].stride;
+ max_stride = plane->max_stride(plane, fb->format->format,
+ fb->modifier, rotation);
+
+ if (stride > max_stride) {
+ DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
+ fb->base.id, stride,
+ plane->base.base.id, plane->base.name, max_stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_rect *src = &plane_state->base.src;
+ u32 src_x, src_y, src_w, src_h;
+
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
+
+ src->x1 = src_x << 16;
+ src->x2 = (src_x + src_w) << 16;
+ src->y1 = src_y << 16;
+ src->y2 = (src_y + src_h) << 16;
+
+ if (fb->format->is_yuv &&
+ fb->format->format != DRM_FORMAT_NV12 &&
+ (src_x & 1 || src_w & 1)) {
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
+ src_x, src_w);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+unsigned int
+skl_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ int cpp = drm_format_plane_cpp(pixel_format, 0);
+
+ /*
+ * "The stride in bytes must not exceed the
+ * of the size of 8K pixels and 32K bytes."
+ */
+ if (drm_rotation_90_or_270(rotation))
+ return min(8192, 32768 / cpp);
+ else
+ return min(8192 * cpp, 32768);
+}
+
void
skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -241,16 +313,15 @@ skl_update_plane(struct intel_plane *plane,
enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->main.offset;
- unsigned int rotation = plane_state->base.rotation;
- u32 stride = skl_plane_stride(fb, 0, rotation);
- u32 aux_stride = skl_plane_stride(fb, 1, rotation);
+ u32 surf_addr = plane_state->color_plane[0].offset;
+ u32 stride = skl_plane_stride(plane_state, 0);
+ u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->main.x;
- uint32_t y = plane_state->main.y;
+ uint32_t x = plane_state->color_plane[0].x;
+ uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
@@ -277,9 +348,10 @@ skl_update_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
- (plane_state->aux.offset - surf_addr) | aux_stride);
+ (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->aux.y << 16) | plane_state->aux.x);
+ (plane_state->color_plane[1].y << 16) |
+ plane_state->color_plane[1].x);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
@@ -545,15 +617,15 @@ vlv_update_plane(struct intel_plane *plane,
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u32 sprctl = plane_state->ctl;
- u32 sprsurf_offset = plane_state->main.offset;
+ u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->main.x;
- uint32_t y = plane_state->main.y;
+ uint32_t x = plane_state->color_plane[0].x;
+ uint32_t y = plane_state->color_plane[0].y;
unsigned long irqflags;
/* Sizes are 0 based */
@@ -574,7 +646,8 @@ vlv_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
}
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -704,15 +777,15 @@ ivb_update_plane(struct intel_plane *plane,
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 sprctl = plane_state->ctl, sprscale = 0;
- u32 sprsurf_offset = plane_state->main.offset;
+ u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->main.x;
- uint32_t y = plane_state->main.y;
+ uint32_t x = plane_state->color_plane[0].x;
+ uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
@@ -736,7 +809,7 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
}
- I915_WRITE_FW(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -749,7 +822,7 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (plane->can_scale)
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), sprscale);
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
@@ -770,7 +843,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
I915_WRITE_FW(SPRCTL(pipe), 0);
/* Can't leave the scaler enabled... */
- if (plane->can_scale)
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
I915_WRITE_FW(SPRSURF(pipe), 0);
@@ -800,6 +873,14 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static unsigned int
+g4x_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 16384;
+}
+
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -868,15 +949,15 @@ g4x_update_plane(struct intel_plane *plane,
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 dvscntr = plane_state->ctl, dvsscale = 0;
- u32 dvssurf_offset = plane_state->main.offset;
+ u32 dvssurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->main.x;
- uint32_t y = plane_state->main.y;
+ uint32_t x = plane_state->color_plane[0].x;
+ uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
@@ -900,7 +981,7 @@ g4x_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
}
- I915_WRITE_FW(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -959,144 +1040,309 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
}
static int
-intel_check_sprite_plane(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *state)
+g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(state->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = state->base.fb;
- int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
- int max_scale, min_scale;
- bool can_scale;
- int ret;
- uint32_t pixel_format = 0;
-
- if (!fb) {
- state->base.visible = false;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_rect *src = &plane_state->base.src;
+ const struct drm_rect *dst = &plane_state->base.dst;
+ int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int width_bytes;
+ int min_width, min_height;
+
+ crtc_w = drm_rect_width(dst);
+ crtc_h = drm_rect_height(dst);
+
+ src_x = src->x1 >> 16;
+ src_y = src->y1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+
+ if (src_w == crtc_w && src_h == crtc_h)
return 0;
+
+ min_width = 3;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (src_h & 1) {
+ DRM_DEBUG_KMS("Source height must be even with interlaced modes\n");
+ return -EINVAL;
+ }
+ min_height = 6;
+ } else {
+ min_height = 3;
}
- /* Don't modify another pipe's plane */
- if (plane->pipe != crtc->pipe) {
- DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
+ width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
+
+ if (src_w < min_width || src_h < min_height ||
+ src_w > 2048 || src_h > 2048) {
+ DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
+ src_w, src_h, min_width, min_height, 2048, 2048);
return -EINVAL;
}
- /* FIXME check all gen limits */
- if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > max_stride) {
- DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
+ if (width_bytes > 4096) {
+ DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n",
+ width_bytes, 4096);
return -EINVAL;
}
- /* setup can_scale, min_scale, max_scale */
- if (INTEL_GEN(dev_priv) >= 9) {
- if (state->base.fb)
- pixel_format = state->base.fb->format->format;
- /* use scaler when colorkey is not required */
- if (!state->ckey.flags) {
- can_scale = 1;
- min_scale = 1;
- max_scale =
- skl_max_scale(crtc, crtc_state, pixel_format);
- } else {
- can_scale = 0;
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
- }
+ if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+ DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
+ fb->pitches[0], 4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+g4x_sprite_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ int max_scale, min_scale;
+ int ret;
+
+ if (INTEL_GEN(dev_priv) < 7) {
+ min_scale = 1;
+ max_scale = 16 << 16;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ min_scale = 1;
+ max_scale = 2 << 16;
} else {
- can_scale = plane->can_scale;
- max_scale = plane->max_downscale << 16;
- min_scale = plane->can_scale ? 1 : (1 << 16);
+ min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
- ret = drm_atomic_helper_check_plane_state(&state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
&crtc_state->base,
min_scale, max_scale,
true, true);
if (ret)
return ret;
- if (state->base.visible) {
- struct drm_rect *src = &state->base.src;
- struct drm_rect *dst = &state->base.dst;
- unsigned int crtc_w = drm_rect_width(dst);
- unsigned int crtc_h = drm_rect_height(dst);
- uint32_t src_x, src_y, src_w, src_h;
+ if (!plane_state->base.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = g4x_sprite_check_scaling(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(dev_priv) >= 7)
+ plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
+ else
+ plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ unsigned int rotation = plane_state->base.rotation;
+
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ rotation & DRM_MODE_ROTATE_180 &&
+ rotation & DRM_MODE_REFLECT_X) {
+ DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vlv_sprite_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!plane_state->base.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ struct drm_format_name_buf format_name;
+
+ if (!fb)
+ return 0;
+
+ if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
+ is_ccs_modifier(fb->modifier)) {
+ DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
+ return -EINVAL;
+ }
+
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
+ if (drm_rotation_90_or_270(rotation)) {
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
+ DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+ return -EINVAL;
+ }
/*
- * Hardware doesn't handle subpixel coordinates.
- * Adjust to (macro)pixel boundary, but be careful not to
- * increase the source viewport size, because that could
- * push the downscaling factor out of bounds.
+ * 90/270 is not allowed with RGB64 16:16:16:16,
+ * RGB 16-bit 5:6:5, and Indexed 8-bit.
+ * TBD: Add RGB64 case once its added in supported format list.
*/
- src_x = src->x1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_y = src->y1 >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- src->x1 = src_x << 16;
- src->x2 = (src_x + src_w) << 16;
- src->y1 = src_y << 16;
- src->y2 = (src_y + src_h) << 16;
-
- if (fb->format->is_yuv &&
- fb->format->format != DRM_FORMAT_NV12 &&
- (src_x % 2 || src_w % 2)) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
- src_x, src_w);
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
return -EINVAL;
+ default:
+ break;
}
+ }
- /* Check size restrictions when scaling */
- if (src_w != crtc_w || src_h != crtc_h) {
- unsigned int width_bytes;
- int cpp = fb->format->cpp[0];
+ /* Y-tiling is not supported in IF-ID Interlace mode */
+ if (crtc_state->base.enable &&
+ crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
+ DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
- WARN_ON(!can_scale);
+ return 0;
+}
- width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
+static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->base.dst);
+ int pipe_src_w = crtc_state->pipe_src_w;
- /* FIXME interlacing min height is 6 */
- if (INTEL_GEN(dev_priv) < 9 && (
- src_w < 3 || src_h < 3 ||
- src_w > 2048 || src_h > 2048 ||
- crtc_w < 3 || crtc_h < 3 ||
- width_bytes > 4096 || fb->pitches[0] > 4096)) {
- DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
- return -EINVAL;
- }
- }
+ /*
+ * Display WA #1175: cnl,glk
+ * Planes other than the cursor may cause FIFO underflow and display
+ * corruption if starting less than 4 pixels from the right edge of
+ * the screen.
+ * Besides the above WA fix the similar problem, where planes other
+ * than the cursor ending less than 4 pixels from the left edge of the
+ * screen may cause FIFO underflow and display corruption.
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+ (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
+ DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
+ return -ERANGE;
}
- if (INTEL_GEN(dev_priv) >= 9) {
- ret = skl_check_plane_surface(crtc_state, state);
- if (ret)
- return ret;
+ return 0;
+}
- state->ctl = skl_plane_ctl(crtc_state, state);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- ret = i9xx_check_plane_surface(state);
- if (ret)
- return ret;
+int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ int max_scale, min_scale;
+ int ret;
- state->ctl = vlv_sprite_ctl(crtc_state, state);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- ret = i9xx_check_plane_surface(state);
- if (ret)
- return ret;
+ ret = skl_plane_check_fb(crtc_state, plane_state);
+ if (ret)
+ return ret;
- state->ctl = ivb_sprite_ctl(crtc_state, state);
- } else {
- ret = i9xx_check_plane_surface(state);
- if (ret)
- return ret;
+ /* use scaler when colorkey is not required */
+ if (!plane_state->ckey.flags) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
- state->ctl = g4x_sprite_ctl(crtc_state, state);
+ min_scale = 1;
+ max_scale = skl_max_scale(crtc_state,
+ fb ? fb->format->format : 0);
+ } else {
+ min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!plane_state->base.visible)
+ return 0;
+
+ ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = skl_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- state->color_ctl = glk_plane_color_ctl(crtc_state, state);
+ plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
+ plane_state);
return 0;
}
@@ -1523,15 +1769,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->base.state = &state->base;
if (INTEL_GEN(dev_priv) >= 9) {
- intel_plane->can_scale = true;
state->scaler_id = -1;
intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
PLANE_SPRITE0 + plane);
+ intel_plane->max_stride = skl_plane_max_stride;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
+ intel_plane->check_plane = skl_plane_check;
if (skl_plane_has_planar(dev_priv, pipe,
PLANE_SPRITE0 + plane)) {
@@ -1549,12 +1796,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_plane->can_scale = false;
- intel_plane->max_downscale = 1;
-
+ intel_plane->max_stride = i9xx_plane_max_stride;
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
intel_plane->get_hw_state = vlv_plane_get_hw_state;
+ intel_plane->check_plane = vlv_sprite_check;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1562,17 +1808,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- if (IS_IVYBRIDGE(dev_priv)) {
- intel_plane->can_scale = true;
- intel_plane->max_downscale = 2;
- } else {
- intel_plane->can_scale = false;
- intel_plane->max_downscale = 1;
- }
-
+ intel_plane->max_stride = g4x_sprite_max_stride;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->get_hw_state = ivb_plane_get_hw_state;
+ intel_plane->check_plane = g4x_sprite_check;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1580,12 +1820,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &snb_sprite_funcs;
} else {
- intel_plane->can_scale = true;
- intel_plane->max_downscale = 16;
-
+ intel_plane->max_stride = g4x_sprite_max_stride;
intel_plane->update_plane = g4x_update_plane;
intel_plane->disable_plane = g4x_disable_plane;
intel_plane->get_hw_state = g4x_plane_get_hw_state;
+ intel_plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
@@ -1618,7 +1857,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->i9xx_plane = plane;
intel_plane->id = PLANE_SPRITE0 + plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
- intel_plane->check_plane = intel_check_sprite_plane;
possible_crtcs = (1 << pipe);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 7c95697e1a35..b1b3e81b6e24 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -401,6 +401,10 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
+ } else if (INTEL_GEN(i915) < 11) {
+ ret = intel_guc_sample_forcewake(guc);
+ if (ret)
+ goto err_communication;
}
dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index e272127783fe..8d03f64eabd7 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -235,6 +235,8 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
sg = sg_next(sg);
} while (1);
+ i915_sg_trim(st);
+
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 4e6a221063ac..f7392c1ffe75 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -298,6 +298,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@@ -375,6 +376,7 @@ static int igt_gem_coherency(void *arg)
}
}
unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1c92560d35da..76df25aa90c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -22,6 +22,8 @@
*
*/
+#include <linux/prime_numbers.h>
+
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
@@ -32,6 +34,200 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+struct live_test {
+ struct drm_i915_private *i915;
+ const char *func;
+ const char *name;
+
+ unsigned int reset_count;
+};
+
+static int begin_live_test(struct live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name)
+{
+ int err;
+
+ t->i915 = i915;
+ t->func = func;
+ t->name = name;
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err) {
+ pr_err("%s(%s): failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
+
+ i915->gpu_error.missed_irq_rings = 0;
+ t->reset_count = i915_reset_count(&i915->gpu_error);
+
+ return 0;
+}
+
+static int end_live_test(struct live_test *t)
+{
+ struct drm_i915_private *i915 = t->i915;
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
+
+ if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+ pr_err("%s(%s): GPU was reset %d times!\n",
+ t->func, t->name,
+ i915_reset_count(&i915->gpu_error) - t->reset_count);
+ return -EIO;
+ }
+
+ if (i915->gpu_error.missed_irq_rings) {
+ pr_err("%s(%s): Missed interrupts on engines %lx\n",
+ t->func, t->name, i915->gpu_error.missed_irq_rings);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int live_nop_switch(void *arg)
+{
+ const unsigned int nctx = 1024;
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context **ctx;
+ enum intel_engine_id id;
+ struct drm_file *file;
+ struct live_test t;
+ unsigned long n;
+ int err = -ENODEV;
+
+ /*
+ * Create as many contexts as we can feasibly get away with
+ * and check we can switch between them rapidly.
+ *
+ * Serves as very simple stress test for submission and HW switching
+ * between contexts.
+ */
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
+
+ ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ for (n = 0; n < nctx; n++) {
+ ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx[n])) {
+ err = PTR_ERR(ctx[n]);
+ goto out_unlock;
+ }
+ }
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+ unsigned long end_time, prime;
+ ktime_t times[2] = {};
+
+ times[0] = ktime_get_raw();
+ for (n = 0; n < nctx; n++) {
+ rq = i915_request_alloc(engine, ctx[n]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+ i915_request_add(rq);
+ }
+ if (i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ HZ / 5) < 0) {
+ pr_err("Failed to populated %d contexts\n", nctx);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ times[1] = ktime_get_raw();
+
+ pr_info("Populated %d contexts on %s in %lluns\n",
+ nctx, engine->name, ktime_to_ns(times[1] - times[0]));
+
+ err = begin_live_test(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ end_time = jiffies + i915_selftest.timeout_jiffies;
+ for_each_prime_number_from(prime, 2, 8192) {
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ rq = i915_request_alloc(engine, ctx[n % nctx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+
+ /*
+ * This space is left intentionally blank.
+ *
+ * We do not actually want to perform any
+ * action with this request, we just want
+ * to measure the latency in allocation
+ * and submission of our breadcrumbs -
+ * ensuring that the bare request is sufficient
+ * for the system to work (i.e. proper HEAD
+ * tracking of the rings, interrupt handling,
+ * etc). It also gives us the lowest bounds
+ * for latency.
+ */
+
+ i915_request_add(rq);
+ }
+ if (i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ HZ / 5) < 0) {
+ pr_err("Switching between %ld contexts timed out\n",
+ prime);
+ i915_gem_set_wedged(i915);
+ break;
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 2)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = end_live_test(&t);
+ if (err)
+ goto out_unlock;
+
+ pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
+ }
+
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ mock_file_free(i915, file);
+ return err;
+}
+
static struct i915_vma *
gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
{
@@ -195,6 +391,7 @@ err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
+ i915_vma_put(batch);
err_vma:
i915_vma_unpin(vma);
return err;
@@ -636,6 +833,8 @@ static int igt_switch_to_kernel_context(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
+
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
mutex_unlock(&i915->drm.struct_mutex);
@@ -658,6 +857,8 @@ out_unlock:
GEM_TRACE_DUMP_ON(err);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
+
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
kernel_context_close(ctx);
@@ -713,6 +914,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
+ SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
};
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 6d3516d5bff9..c3999dd2021e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -501,6 +501,8 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
+ i915_gem_shrinker_unregister(i915);
+
mutex_lock(&i915->drm.struct_mutex);
if (!i915->gt.active_requests++) {
intel_runtime_pm_get(i915);
@@ -613,6 +615,7 @@ out_park:
else
queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_shrinker_register(i915);
return err;
err_obj:
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index c4aac6141e04..07e557815308 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -342,6 +342,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@@ -402,6 +403,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -487,6 +489,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@@ -550,6 +553,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -644,6 +648,7 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
err = begin_live_test(&t, i915, __func__, "");
if (err)
@@ -726,6 +731,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -747,6 +753,7 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
err = begin_live_test(&t, i915, __func__, "");
if (err)
@@ -853,6 +860,7 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 90ba88c972cf..0c0ab82b6228 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -142,6 +142,7 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@@ -269,6 +270,7 @@ out:
guc_clients_create(guc);
guc_clients_doorbell_init(guc);
unlock:
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -287,6 +289,7 @@ static int igt_guc_doorbells(void *arg)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@@ -379,6 +382,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 582566faef09..1aea7a8f2224 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -221,6 +221,7 @@ static int live_sanitycheck(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
if (spinner_init(&spin, i915))
goto err_unlock;
@@ -261,6 +262,7 @@ err_spin:
spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -278,6 +280,7 @@ static int live_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
if (spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -350,6 +353,7 @@ err_spin_hi:
spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -368,6 +372,7 @@ static int live_late_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
if (spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -440,6 +445,7 @@ err_spin_hi:
spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -467,6 +473,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
if (spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -561,6 +568,7 @@ err_spin_hi:
spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 0d39b3bf0c0d..d1a0923d2f38 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -44,7 +44,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
+ intel_runtime_pm_get(engine->i915);
rq = i915_request_alloc(engine, ctx);
+ intel_runtime_pm_put(engine->i915);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -175,7 +177,10 @@ static int switch_to_scratch_context(struct intel_engine_cs *engine)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ intel_runtime_pm_get(engine->i915);
rq = i915_request_alloc(engine, ctx);
+ intel_runtime_pm_put(engine->i915);
+
kernel_context_close(ctx);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index ce83c396a742..82ae49c64221 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+
mediatek-drm-y := mtk_disp_color.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \
@@ -18,6 +19,8 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi.o \
mtk_hdmi_ddc.o \
- mtk_mt8173_hdmi_phy.o
+ mtk_mt2701_hdmi_phy.o \
+ mtk_mt8173_hdmi_phy.o \
+ mtk_hdmi_phy.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 6c0ea39d5739..62a9d47df948 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -14,10 +14,12 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/interrupt.h>
#include <linux/types.h>
@@ -72,12 +74,12 @@ struct mtk_dpi {
struct clk *tvd_clk;
int irq;
struct drm_display_mode mode;
+ const struct mtk_dpi_conf *conf;
enum mtk_dpi_out_color_format color_format;
enum mtk_dpi_out_yc_map yc_map;
enum mtk_dpi_out_bit_num bit_num;
enum mtk_dpi_out_channel_swap channel_swap;
- bool power_sta;
- u8 power_ctl;
+ int refcount;
};
static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
@@ -90,11 +92,6 @@ enum mtk_dpi_polarity {
MTK_DPI_POLARITY_FALLING,
};
-enum mtk_dpi_power_ctl {
- DPI_POWER_START = BIT(0),
- DPI_POWER_ENABLE = BIT(1),
-};
-
struct mtk_dpi_polarities {
enum mtk_dpi_polarity de_pol;
enum mtk_dpi_polarity ck_pol;
@@ -116,6 +113,12 @@ struct mtk_dpi_yc_limit {
u16 c_bottom;
};
+struct mtk_dpi_conf {
+ unsigned int (*cal_factor)(int clock);
+ u32 reg_h_fre_con;
+ bool edge_sel_en;
+};
+
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
{
u32 tmp = readl(dpi->regs + offset) & ~mask;
@@ -341,7 +344,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
- mtk_dpi_mask(dpi, DPI_H_FRE_CON, H_FRE_2N, H_FRE_2N);
+ mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
+}
+
+static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
+{
+ if (dpi->conf->edge_sel_en)
+ mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
@@ -367,40 +376,30 @@ static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
}
}
-static void mtk_dpi_power_off(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+static void mtk_dpi_power_off(struct mtk_dpi *dpi)
{
- dpi->power_ctl &= ~pctl;
-
- if ((dpi->power_ctl & DPI_POWER_START) ||
- (dpi->power_ctl & DPI_POWER_ENABLE))
+ if (WARN_ON(dpi->refcount == 0))
return;
- if (!dpi->power_sta)
+ if (--dpi->refcount != 0)
return;
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
- dpi->power_sta = false;
}
-static int mtk_dpi_power_on(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+static int mtk_dpi_power_on(struct mtk_dpi *dpi)
{
int ret;
- dpi->power_ctl |= pctl;
-
- if (!(dpi->power_ctl & DPI_POWER_START) &&
- !(dpi->power_ctl & DPI_POWER_ENABLE))
- return 0;
-
- if (dpi->power_sta)
+ if (++dpi->refcount != 1)
return 0;
ret = clk_prepare_enable(dpi->engine_clk);
if (ret) {
dev_err(dpi->dev, "Failed to enable engine clock: %d\n", ret);
- goto err_eng;
+ goto err_refcount;
}
ret = clk_prepare_enable(dpi->pixel_clk);
@@ -410,13 +409,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
}
mtk_dpi_enable(dpi);
- dpi->power_sta = true;
return 0;
err_pixel:
clk_disable_unprepare(dpi->engine_clk);
-err_eng:
- dpi->power_ctl &= ~pctl;
+err_refcount:
+ dpi->refcount--;
return ret;
}
@@ -435,15 +433,7 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
unsigned int factor;
/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
-
- if (mode->clock <= 27000)
- factor = 3 << 4;
- else if (mode->clock <= 84000)
- factor = 3 << 3;
- else if (mode->clock <= 167000)
- factor = 3 << 2;
- else
- factor = 3 << 1;
+ factor = dpi->conf->cal_factor(mode->clock);
drm_display_mode_to_videomode(mode, &vm);
pll_rate = vm.pixelclock * factor;
@@ -518,6 +508,7 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
mtk_dpi_config_yc_map(dpi, dpi->yc_map);
mtk_dpi_config_color_format(dpi, dpi->color_format);
mtk_dpi_config_2n_h_fre(dpi);
+ mtk_dpi_config_disable_edge(dpi);
mtk_dpi_sw_reset(dpi, false);
return 0;
@@ -552,14 +543,14 @@ static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
{
struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
- mtk_dpi_power_off(dpi, DPI_POWER_ENABLE);
+ mtk_dpi_power_off(dpi);
}
static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
{
struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
- mtk_dpi_power_on(dpi, DPI_POWER_ENABLE);
+ mtk_dpi_power_on(dpi);
mtk_dpi_set_display_mode(dpi, &dpi->mode);
}
@@ -582,14 +573,14 @@ static void mtk_dpi_start(struct mtk_ddp_comp *comp)
{
struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
- mtk_dpi_power_on(dpi, DPI_POWER_START);
+ mtk_dpi_power_on(dpi);
}
static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
{
struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
- mtk_dpi_power_off(dpi, DPI_POWER_START);
+ mtk_dpi_power_off(dpi);
}
static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
@@ -656,12 +647,46 @@ static const struct component_ops mtk_dpi_component_ops = {
.unbind = mtk_dpi_unbind,
};
+static unsigned int mt8173_calculate_factor(int clock)
+{
+ if (clock <= 27000)
+ return 3 << 4;
+ else if (clock <= 84000)
+ return 3 << 3;
+ else if (clock <= 167000)
+ return 3 << 2;
+ else
+ return 3 << 1;
+}
+
+static unsigned int mt2701_calculate_factor(int clock)
+{
+ if (clock <= 64000)
+ return 16;
+ else if (clock <= 128000)
+ return 8;
+ else if (clock <= 256000)
+ return 4;
+ else
+ return 2;
+}
+
+static const struct mtk_dpi_conf mt8173_conf = {
+ .cal_factor = mt8173_calculate_factor,
+ .reg_h_fre_con = 0xe0,
+};
+
+static const struct mtk_dpi_conf mt2701_conf = {
+ .cal_factor = mt2701_calculate_factor,
+ .reg_h_fre_con = 0xb0,
+ .edge_sel_en = true,
+};
+
static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
struct resource *mem;
- struct device_node *bridge_node;
int comp_id;
int ret;
@@ -670,6 +695,7 @@ static int mtk_dpi_probe(struct platform_device *pdev)
return -ENOMEM;
dpi->dev = dev;
+ dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dpi->regs = devm_ioremap_resource(dev, mem);
@@ -706,16 +732,12 @@ static int mtk_dpi_probe(struct platform_device *pdev)
return -EINVAL;
}
- bridge_node = of_graph_get_remote_node(dev->of_node, 0, 0);
- if (!bridge_node)
- return -ENODEV;
-
- dev_info(dev, "Found bridge node: %pOF\n", bridge_node);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
+ NULL, &dpi->bridge);
+ if (ret)
+ return ret;
- dpi->bridge = of_drm_find_bridge(bridge_node);
- of_node_put(bridge_node);
- if (!dpi->bridge)
- return -EPROBE_DEFER;
+ dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node);
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
if (comp_id < 0) {
@@ -749,8 +771,13 @@ static int mtk_dpi_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_dpi_of_ids[] = {
- { .compatible = "mediatek,mt8173-dpi", },
- {}
+ { .compatible = "mediatek,mt2701-dpi",
+ .data = &mt2701_conf,
+ },
+ { .compatible = "mediatek,mt8173-dpi",
+ .data = &mt8173_conf,
+ },
+ { },
};
struct platform_driver mtk_dpi_driver = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
index 4b6ad4751a31..d9db8c4cacd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -223,6 +223,6 @@
#define ESAV_CODE2 (0xFFF << 0)
#define ESAV_CODE3_MSB BIT(16)
-#define DPI_H_FRE_CON 0xE0
+#define EDGE_SEL_EN BIT(5)
#define H_FRE_2N BIT(25)
#endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 546b3e3b300b..579ce28d801d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -39,6 +39,7 @@
#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
#define DISP_REG_CONFIG_OUT_SEL 0x04c
#define DISP_REG_CONFIG_DSI_SEL 0x050
+#define DISP_REG_CONFIG_DPI_SEL 0x064
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
@@ -136,7 +137,10 @@
#define OVL_MOUT_EN_RDMA 0x1
#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
#define DSI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_BLS 0x0
+#define DSI_SEL_IN_RDMA 0x1
struct mtk_disp_mutex {
int id;
@@ -339,9 +343,17 @@ static void mtk_ddp_sout_sel(void __iomem *config_regs,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
{
- if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0)
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
config_regs + DISP_REG_CONFIG_OUT_SEL);
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
+ writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ writel_relaxed(DSI_SEL_IN_RDMA,
+ config_regs + DISP_REG_CONFIG_DSI_SEL);
+ writel_relaxed(DPI_SEL_IN_BLS,
+ config_regs + DISP_REG_CONFIG_DPI_SEL);
+ }
}
void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index ff974d82a4a6..54ca794db3e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -294,7 +294,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
comp->irq = of_irq_get(node, 0);
comp->clk = of_clk_get(node, 0);
if (IS_ERR(comp->clk))
- comp->clk = NULL;
+ return PTR_ERR(comp->clk);
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 47ec604289b7..6422e99952fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -424,6 +424,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
.data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt2701-dpi",
+ .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8173-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-disp-mutex",
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 2d45d1dd9554..11e3644da79a 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -233,6 +233,7 @@ static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
{
struct arm_smccc_res res;
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy);
/*
* MT8173 HDMI hardware has an output control bit to enable/disable HDMI
@@ -240,8 +241,13 @@ static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
* The ARM trusted firmware provides an API for the HDMI driver to set
* this control bit to enable HDMI output in supervisor mode.
*/
- arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
- 0, 0, 0, 0, 0, &res);
+ if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled)
+ regmap_update_bits(hdmi->sys_regmap,
+ hdmi->sys_offset + HDMI_SYS_CFG20,
+ 0x80008005, enable ? 0x80000005 : 0x8000);
+ else
+ arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904,
+ 0x80000000, 0, 0, 0, 0, 0, &res);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
@@ -1575,6 +1581,11 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
+ case HDMI_SPDIF:
+ hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ break;
default:
dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
daifmt->fmt);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
index 6371b3de1ff6..3e9fb8d19802 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -13,11 +13,11 @@
*/
#ifndef _MTK_HDMI_CTRL_H
#define _MTK_HDMI_CTRL_H
+#include "mtk_hdmi_phy.h"
struct platform_driver;
extern struct platform_driver mtk_cec_driver;
extern struct platform_driver mtk_hdmi_ddc_driver;
-extern struct platform_driver mtk_hdmi_phy_driver;
#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
new file mode 100644
index 000000000000..4ef9c57ffd44
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ */
+
+#include "mtk_hdmi_phy.h"
+
+static int mtk_hdmi_phy_power_on(struct phy *phy);
+static int mtk_hdmi_phy_power_off(struct phy *phy);
+
+static const struct phy_ops mtk_hdmi_phy_dev_ops = {
+ .power_on = mtk_hdmi_phy_power_on,
+ .power_off = mtk_hdmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ hdmi_phy->pll_rate = rate;
+ if (rate <= 74250000)
+ *parent_rate = rate;
+ else
+ *parent_rate = rate / 2;
+
+ return rate;
+}
+
+unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ return hdmi_phy->pll_rate;
+}
+
+void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 val, u32 mask)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel(tmp, reg);
+}
+
+inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_hdmi_phy, pll_hw);
+}
+
+static int mtk_hdmi_phy_power_on(struct phy *phy)
+{
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(hdmi_phy->pll);
+ if (ret < 0)
+ return ret;
+
+ hdmi_phy->conf->hdmi_phy_enable_tmds(hdmi_phy);
+ return 0;
+}
+
+static int mtk_hdmi_phy_power_off(struct phy *phy)
+{
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+
+ hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+ clk_disable_unprepare(hdmi_phy->pll);
+
+ return 0;
+}
+
+static const struct phy_ops *
+mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
+{
+ if (hdmi_phy && hdmi_phy->conf &&
+ hdmi_phy->conf->hdmi_phy_enable_tmds &&
+ hdmi_phy->conf->hdmi_phy_disable_tmds)
+ return &mtk_hdmi_phy_dev_ops;
+
+ dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
+ return NULL;
+}
+
+static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
+ const struct clk_ops **ops)
+{
+ if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
+ *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
+ else
+ dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+}
+
+static int mtk_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi_phy *hdmi_phy;
+ struct resource *mem;
+ struct clk *ref_clk;
+ const char *ref_clk_name;
+ struct clk_init_data clk_init = {
+ .num_parents = 1,
+ .parent_names = (const char * const *)&ref_clk_name,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+ };
+
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+ int ret;
+
+ hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
+ if (!hdmi_phy)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi_phy->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hdmi_phy->regs)) {
+ ret = PTR_ERR(hdmi_phy->regs);
+ dev_err(dev, "Failed to get memory resource: %d\n", ret);
+ return ret;
+ }
+
+ ref_clk = devm_clk_get(dev, "pll_ref");
+ if (IS_ERR(ref_clk)) {
+ ret = PTR_ERR(ref_clk);
+ dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
+ ret);
+ return ret;
+ }
+ ref_clk_name = __clk_get_name(ref_clk);
+
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &clk_init.name);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+ return ret;
+ }
+
+ hdmi_phy->dev = dev;
+ hdmi_phy->conf =
+ (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
+ mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+ hdmi_phy->pll_hw.init = &clk_init;
+ hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
+ if (IS_ERR(hdmi_phy->pll)) {
+ ret = PTR_ERR(hdmi_phy->pll);
+ dev_err(dev, "Failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
+ &hdmi_phy->ibias);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
+ &hdmi_phy->ibias_up);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
+ hdmi_phy->drv_imp_clk = 0x30;
+ hdmi_phy->drv_imp_d2 = 0x30;
+ hdmi_phy->drv_imp_d1 = 0x30;
+ hdmi_phy->drv_imp_d0 = 0x30;
+
+ phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create HDMI PHY\n");
+ return PTR_ERR(phy);
+ }
+ phy_set_drvdata(phy, hdmi_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "Failed to register HDMI PHY\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+ hdmi_phy->pll);
+}
+
+static const struct of_device_id mtk_hdmi_phy_match[] = {
+ { .compatible = "mediatek,mt2701-hdmi-phy",
+ .data = &mtk_hdmi_phy_2701_conf,
+ },
+ { .compatible = "mediatek,mt8173-hdmi-phy",
+ .data = &mtk_hdmi_phy_8173_conf,
+ },
+ {},
+};
+
+struct platform_driver mtk_hdmi_phy_driver = {
+ .probe = mtk_hdmi_phy_probe,
+ .driver = {
+ .name = "mediatek-hdmi-phy",
+ .of_match_table = mtk_hdmi_phy_match,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
new file mode 100644
index 000000000000..f39b1fc66612
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#ifndef _MTK_HDMI_PHY_H
+#define _MTK_HDMI_PHY_H
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct mtk_hdmi_phy;
+
+struct mtk_hdmi_phy_conf {
+ bool tz_disabled;
+ const struct clk_ops *hdmi_phy_clk_ops;
+ void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
+ void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
+};
+
+struct mtk_hdmi_phy {
+ void __iomem *regs;
+ struct device *dev;
+ struct mtk_hdmi_phy_conf *conf;
+ struct clk *pll;
+ struct clk_hw pll_hw;
+ unsigned long pll_rate;
+ unsigned char drv_imp_clk;
+ unsigned char drv_imp_d2;
+ unsigned char drv_imp_d1;
+ unsigned char drv_imp_d0;
+ unsigned int ibias;
+ unsigned int ibias_up;
+};
+
+void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits);
+void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits);
+void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 val, u32 mask);
+struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
+long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+extern struct platform_driver mtk_hdmi_phy_driver;
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf;
+
+#endif /* _MTK_HDMI_PHY_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
new file mode 100644
index 000000000000..fcc42dc6ea7f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#include "mtk_hdmi_phy.h"
+
+#define HDMI_CON0 0x00
+#define RG_HDMITX_DRV_IBIAS 0
+#define RG_HDMITX_DRV_IBIAS_MASK (0x3f << 0)
+#define RG_HDMITX_EN_SER 12
+#define RG_HDMITX_EN_SER_MASK (0x0f << 12)
+#define RG_HDMITX_EN_SLDO 16
+#define RG_HDMITX_EN_SLDO_MASK (0x0f << 16)
+#define RG_HDMITX_EN_PRED 20
+#define RG_HDMITX_EN_PRED_MASK (0x0f << 20)
+#define RG_HDMITX_EN_IMP 24
+#define RG_HDMITX_EN_IMP_MASK (0x0f << 24)
+#define RG_HDMITX_EN_DRV 28
+#define RG_HDMITX_EN_DRV_MASK (0x0f << 28)
+
+#define HDMI_CON1 0x04
+#define RG_HDMITX_PRED_IBIAS 18
+#define RG_HDMITX_PRED_IBIAS_MASK (0x0f << 18)
+#define RG_HDMITX_PRED_IMP (0x01 << 22)
+#define RG_HDMITX_DRV_IMP 26
+#define RG_HDMITX_DRV_IMP_MASK (0x3f << 26)
+
+#define HDMI_CON2 0x08
+#define RG_HDMITX_EN_TX_CKLDO (0x01 << 0)
+#define RG_HDMITX_EN_TX_POSDIV (0x01 << 1)
+#define RG_HDMITX_TX_POSDIV 3
+#define RG_HDMITX_TX_POSDIV_MASK (0x03 << 3)
+#define RG_HDMITX_EN_MBIAS (0x01 << 6)
+#define RG_HDMITX_MBIAS_LPF_EN (0x01 << 7)
+
+#define HDMI_CON4 0x10
+#define RG_HDMITX_RESERVE_MASK (0xffffffff << 0)
+
+#define HDMI_CON6 0x18
+#define RG_HTPLL_BR 0
+#define RG_HTPLL_BR_MASK (0x03 << 0)
+#define RG_HTPLL_BC 2
+#define RG_HTPLL_BC_MASK (0x03 << 2)
+#define RG_HTPLL_BP 4
+#define RG_HTPLL_BP_MASK (0x0f << 4)
+#define RG_HTPLL_IR 8
+#define RG_HTPLL_IR_MASK (0x0f << 8)
+#define RG_HTPLL_IC 12
+#define RG_HTPLL_IC_MASK (0x0f << 12)
+#define RG_HTPLL_POSDIV 16
+#define RG_HTPLL_POSDIV_MASK (0x03 << 16)
+#define RG_HTPLL_PREDIV 18
+#define RG_HTPLL_PREDIV_MASK (0x03 << 18)
+#define RG_HTPLL_FBKSEL 20
+#define RG_HTPLL_FBKSEL_MASK (0x03 << 20)
+#define RG_HTPLL_RLH_EN (0x01 << 22)
+#define RG_HTPLL_FBKDIV 24
+#define RG_HTPLL_FBKDIV_MASK (0x7f << 24)
+#define RG_HTPLL_EN (0x01 << 31)
+
+#define HDMI_CON7 0x1c
+#define RG_HTPLL_AUTOK_EN (0x01 << 23)
+#define RG_HTPLL_DIVEN 28
+#define RG_HTPLL_DIVEN_MASK (0x07 << 28)
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ usleep_range(80, 100);
+ return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ usleep_range(80, 100);
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ u32 pos_div;
+
+ if (rate <= 64000000)
+ pos_div = 3;
+ else if (rate <= 12800000)
+ pos_div = 1;
+ else
+ pos_div = 1;
+
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
+ RG_HTPLL_IC_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
+ RG_HTPLL_IR_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
+ RG_HDMITX_TX_POSDIV_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
+ RG_HTPLL_FBKSEL_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
+ RG_HTPLL_FBKDIV_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
+ RG_HTPLL_DIVEN_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
+ RG_HTPLL_BP_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
+ RG_HTPLL_BC_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
+ RG_HTPLL_BR_MASK);
+
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
+ RG_HDMITX_PRED_IBIAS_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
+ RG_HDMITX_DRV_IMP_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
+ RG_HDMITX_DRV_IBIAS_MASK);
+ return 0;
+}
+
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
+ .prepare = mtk_hdmi_pll_prepare,
+ .unprepare = mtk_hdmi_pll_unprepare,
+ .set_rate = mtk_hdmi_pll_set_rate,
+ .round_rate = mtk_hdmi_pll_round_rate,
+ .recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ usleep_range(80, 100);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ usleep_range(80, 100);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ usleep_range(80, 100);
+}
+
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
+ .tz_disabled = true,
+ .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+ .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+ .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
+};
+
+MODULE_AUTHOR("Chunhui Dai <chunhui.dai@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 51cb9cfb6646..ed5916b27658 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -12,15 +12,7 @@
* GNU General Public License for more details.
*/
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
+#include "mtk_hdmi_phy.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_PLL_EN BIT(31)
@@ -123,20 +115,6 @@
#define RGS_HDMITX_5T1_EDG (0xf << 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
-struct mtk_hdmi_phy {
- void __iomem *regs;
- struct device *dev;
- struct clk *pll;
- struct clk_hw pll_hw;
- unsigned long pll_rate;
- u8 drv_imp_clk;
- u8 drv_imp_d2;
- u8 drv_imp_d1;
- u8 drv_imp_d0;
- u32 ibias;
- u32 ibias_up;
-};
-
static const u8 PREDIV[3][4] = {
{0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
{0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
@@ -185,44 +163,6 @@ static const u8 HTPLLBR[3][4] = {
{0x1, 0x2, 0x2, 0x1} /* 148Mhz */
};
-static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 val, u32 mask)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
-static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
-{
- return container_of(hw, struct mtk_hdmi_phy, pll_hw);
-}
-
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
@@ -345,29 +285,7 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- hdmi_phy->pll_rate = rate;
- if (rate <= 74250000)
- *parent_rate = rate;
- else
- *parent_rate = rate / 2;
-
- return rate;
-}
-
-static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
- return hdmi_phy->pll_rate;
-}
-
-static const struct clk_ops mtk_hdmi_pll_ops = {
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
.prepare = mtk_hdmi_pll_prepare,
.unprepare = mtk_hdmi_pll_unprepare,
.set_rate = mtk_hdmi_pll_set_rate,
@@ -390,142 +308,10 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
RG_HDMITX_SER_EN);
}
-static int mtk_hdmi_phy_power_on(struct phy *phy)
-{
- struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
- int ret;
-
- ret = clk_prepare_enable(hdmi_phy->pll);
- if (ret < 0)
- return ret;
-
- mtk_hdmi_phy_enable_tmds(hdmi_phy);
-
- return 0;
-}
-
-static int mtk_hdmi_phy_power_off(struct phy *phy)
-{
- struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
-
- mtk_hdmi_phy_disable_tmds(hdmi_phy);
- clk_disable_unprepare(hdmi_phy->pll);
-
- return 0;
-}
-
-static const struct phy_ops mtk_hdmi_phy_ops = {
- .power_on = mtk_hdmi_phy_power_on,
- .power_off = mtk_hdmi_phy_power_off,
- .owner = THIS_MODULE,
-};
-
-static int mtk_hdmi_phy_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct mtk_hdmi_phy *hdmi_phy;
- struct resource *mem;
- struct clk *ref_clk;
- const char *ref_clk_name;
- struct clk_init_data clk_init = {
- .ops = &mtk_hdmi_pll_ops,
- .num_parents = 1,
- .parent_names = (const char * const *)&ref_clk_name,
- .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
- };
- struct phy *phy;
- struct phy_provider *phy_provider;
- int ret;
-
- hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
- if (!hdmi_phy)
- return -ENOMEM;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi_phy->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(hdmi_phy->regs)) {
- ret = PTR_ERR(hdmi_phy->regs);
- dev_err(dev, "Failed to get memory resource: %d\n", ret);
- return ret;
- }
-
- ref_clk = devm_clk_get(dev, "pll_ref");
- if (IS_ERR(ref_clk)) {
- ret = PTR_ERR(ref_clk);
- dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
- ret);
- return ret;
- }
- ref_clk_name = __clk_get_name(ref_clk);
-
- ret = of_property_read_string(dev->of_node, "clock-output-names",
- &clk_init.name);
- if (ret < 0) {
- dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
- return ret;
- }
-
- hdmi_phy->pll_hw.init = &clk_init;
- hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
- if (IS_ERR(hdmi_phy->pll)) {
- ret = PTR_ERR(hdmi_phy->pll);
- dev_err(dev, "Failed to register PLL: %d\n", ret);
- return ret;
- }
-
- ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
- &hdmi_phy->ibias);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
- return ret;
- }
-
- ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
- &hdmi_phy->ibias_up);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
- return ret;
- }
-
- dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
- hdmi_phy->drv_imp_clk = 0x30;
- hdmi_phy->drv_imp_d2 = 0x30;
- hdmi_phy->drv_imp_d1 = 0x30;
- hdmi_phy->drv_imp_d0 = 0x30;
-
- phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
- if (IS_ERR(phy)) {
- dev_err(dev, "Failed to create HDMI PHY\n");
- return PTR_ERR(phy);
- }
- phy_set_drvdata(phy, hdmi_phy);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- hdmi_phy->dev = dev;
- return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
- hdmi_phy->pll);
-}
-
-static int mtk_hdmi_phy_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static const struct of_device_id mtk_hdmi_phy_match[] = {
- { .compatible = "mediatek,mt8173-hdmi-phy", },
- {},
-};
-
-struct platform_driver mtk_hdmi_phy_driver = {
- .probe = mtk_hdmi_phy_probe,
- .remove = mtk_hdmi_phy_remove,
- .driver = {
- .name = "mediatek-hdmi-phy",
- .of_match_table = mtk_hdmi_phy_match,
- },
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+ .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+ .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+ .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
};
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 261fa79d456d..19ab521d4c3a 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -58,7 +58,6 @@ msm-y := \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_blk.o \
disp/dpu1/dpu_hw_catalog.o \
- disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 4bff0a740c7d..12b0ba270b5e 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 645a19aef399..a89f7bb8b5cc 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 19565e87aa7b..858690f52854 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 182d37ff3794..b4944cc0e62f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 059ec7d394d0..d2127b1c4ece 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -132,14 +132,14 @@ reset_set(void *data, u64 val)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->pm4_bo);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->pfp_bo);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 29b3d9d7d54b..8edd80bb0428 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1231,7 +1231,7 @@ static void a5xx_crashdumper_free(struct msm_gpu *gpu,
msm_gem_put_iova(dumper->bo, gpu->aspace);
msm_gem_put_vaddr(dumper->bo);
- drm_gem_object_unreference(dumper->bo);
+ drm_gem_object_put(dumper->bo);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1433,12 +1433,22 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
-static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
+static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
- REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+ u64 busy_cycles, busy_time;
- return 0;
+ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+ REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+
+ busy_time = busy_cycles - gpu->devfreq.busy_cycles;
+ do_div(busy_time, clk_get_rate(gpu->core_clk) / 1000000);
+
+ gpu->devfreq.busy_cycles = busy_cycles;
+
+ if (WARN_ON(busy_time > ~0LU))
+ return ~0LU;
+
+ return (unsigned long)busy_time;
}
static const struct adreno_gpu_funcs funcs = {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index e9c0e56dbec0..7a41e1c147e4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -323,7 +323,7 @@ err:
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
if (a5xx_gpu->gpmu_bo)
- drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
+ drm_gem_object_put(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 970c7963ae29..4c357ead1be6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -208,6 +208,13 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
@@ -220,9 +227,6 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
-
- /* Always come up on rb 0 */
- a5xx_gpu->cur_ring = gpu->rb[0];
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
@@ -272,7 +276,7 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
if (a5xx_gpu->preempt_iova[i])
msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
+ drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
a5xx_gpu->preempt_bo[i] = NULL;
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 87eab51f7000..a6f7c40454a6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -268,8 +268,687 @@ enum a6xx_depth_format {
DEPTH6_32 = 4,
};
+enum a6xx_shader_id {
+ A6XX_TP0_TMO_DATA = 9,
+ A6XX_TP0_SMO_DATA = 10,
+ A6XX_TP0_MIPMAP_BASE_DATA = 11,
+ A6XX_TP1_TMO_DATA = 25,
+ A6XX_TP1_SMO_DATA = 26,
+ A6XX_TP1_MIPMAP_BASE_DATA = 27,
+ A6XX_SP_INST_DATA = 41,
+ A6XX_SP_LB_0_DATA = 42,
+ A6XX_SP_LB_1_DATA = 43,
+ A6XX_SP_LB_2_DATA = 44,
+ A6XX_SP_LB_3_DATA = 45,
+ A6XX_SP_LB_4_DATA = 46,
+ A6XX_SP_LB_5_DATA = 47,
+ A6XX_SP_CB_BINDLESS_DATA = 48,
+ A6XX_SP_CB_LEGACY_DATA = 49,
+ A6XX_SP_UAV_DATA = 50,
+ A6XX_SP_INST_TAG = 51,
+ A6XX_SP_CB_BINDLESS_TAG = 52,
+ A6XX_SP_TMO_UMO_TAG = 53,
+ A6XX_SP_SMO_TAG = 54,
+ A6XX_SP_STATE_DATA = 55,
+ A6XX_HLSQ_CHUNK_CVS_RAM = 73,
+ A6XX_HLSQ_CHUNK_CPS_RAM = 74,
+ A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
+ A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
+ A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
+ A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
+ A6XX_HLSQ_CVS_MISC_RAM = 80,
+ A6XX_HLSQ_CPS_MISC_RAM = 81,
+ A6XX_HLSQ_INST_RAM = 82,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM = 83,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM = 84,
+ A6XX_HLSQ_CVS_MISC_RAM_TAG = 85,
+ A6XX_HLSQ_CPS_MISC_RAM_TAG = 86,
+ A6XX_HLSQ_INST_RAM_TAG = 87,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
+ A6XX_HLSQ_PWR_REST_RAM = 90,
+ A6XX_HLSQ_PWR_REST_TAG = 91,
+ A6XX_HLSQ_DATAPATH_META = 96,
+ A6XX_HLSQ_FRONTEND_META = 97,
+ A6XX_HLSQ_INDIRECT_META = 98,
+ A6XX_HLSQ_BACKEND_META = 99,
+};
+
+enum a6xx_debugbus_id {
+ A6XX_DBGBUS_CP = 1,
+ A6XX_DBGBUS_RBBM = 2,
+ A6XX_DBGBUS_VBIF = 3,
+ A6XX_DBGBUS_HLSQ = 4,
+ A6XX_DBGBUS_UCHE = 5,
+ A6XX_DBGBUS_DPM = 6,
+ A6XX_DBGBUS_TESS = 7,
+ A6XX_DBGBUS_PC = 8,
+ A6XX_DBGBUS_VFDP = 9,
+ A6XX_DBGBUS_VPC = 10,
+ A6XX_DBGBUS_TSE = 11,
+ A6XX_DBGBUS_RAS = 12,
+ A6XX_DBGBUS_VSC = 13,
+ A6XX_DBGBUS_COM = 14,
+ A6XX_DBGBUS_LRZ = 16,
+ A6XX_DBGBUS_A2D = 17,
+ A6XX_DBGBUS_CCUFCHE = 18,
+ A6XX_DBGBUS_GMU_CX = 19,
+ A6XX_DBGBUS_RBP = 20,
+ A6XX_DBGBUS_DCS = 21,
+ A6XX_DBGBUS_DBGC = 22,
+ A6XX_DBGBUS_CX = 23,
+ A6XX_DBGBUS_GMU_GX = 24,
+ A6XX_DBGBUS_TPFCHE = 25,
+ A6XX_DBGBUS_GBIF_GX = 26,
+ A6XX_DBGBUS_GPC = 29,
+ A6XX_DBGBUS_LARC = 30,
+ A6XX_DBGBUS_HLSQ_SPTP = 31,
+ A6XX_DBGBUS_RB_0 = 32,
+ A6XX_DBGBUS_RB_1 = 33,
+ A6XX_DBGBUS_UCHE_WRAPPER = 36,
+ A6XX_DBGBUS_CCU_0 = 40,
+ A6XX_DBGBUS_CCU_1 = 41,
+ A6XX_DBGBUS_VFD_0 = 56,
+ A6XX_DBGBUS_VFD_1 = 57,
+ A6XX_DBGBUS_VFD_2 = 58,
+ A6XX_DBGBUS_VFD_3 = 59,
+ A6XX_DBGBUS_SP_0 = 64,
+ A6XX_DBGBUS_SP_1 = 65,
+ A6XX_DBGBUS_TPL1_0 = 72,
+ A6XX_DBGBUS_TPL1_1 = 73,
+ A6XX_DBGBUS_TPL1_2 = 74,
+ A6XX_DBGBUS_TPL1_3 = 75,
+};
+
enum a6xx_cp_perfcounter_select {
PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_NUM_PREEMPTIONS = 3,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 4,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 8,
+ PERF_CP_MODE_SWITCH = 9,
+ PERF_CP_ZPASS_DONE = 10,
+ PERF_CP_CONTEXT_DONE = 11,
+ PERF_CP_CACHE_FLUSH = 12,
+ PERF_CP_LONG_PREEMPTIONS = 13,
+ PERF_CP_SQE_I_CACHE_STARVE = 14,
+ PERF_CP_SQE_IDLE = 15,
+ PERF_CP_SQE_PM4_STARVE_RB_IB = 16,
+ PERF_CP_SQE_PM4_STARVE_SDS = 17,
+ PERF_CP_SQE_MRB_STARVE = 18,
+ PERF_CP_SQE_RRB_STARVE = 19,
+ PERF_CP_SQE_VSD_STARVE = 20,
+ PERF_CP_VSD_DECODE_STARVE = 21,
+ PERF_CP_SQE_PIPE_OUT_STALL = 22,
+ PERF_CP_SQE_SYNC_STALL = 23,
+ PERF_CP_SQE_PM4_WFI_STALL = 24,
+ PERF_CP_SQE_SYS_WFI_STALL = 25,
+ PERF_CP_SQE_T4_EXEC = 26,
+ PERF_CP_SQE_LOAD_STATE_EXEC = 27,
+ PERF_CP_SQE_SAVE_SDS_STATE = 28,
+ PERF_CP_SQE_DRAW_EXEC = 29,
+ PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30,
+ PERF_CP_SQE_EXEC_PROFILED = 31,
+ PERF_CP_MEMORY_POOL_EMPTY = 32,
+ PERF_CP_MEMORY_POOL_SYNC_STALL = 33,
+ PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34,
+ PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35,
+ PERF_CP_AHB_STALL_SQE_GMU = 36,
+ PERF_CP_AHB_STALL_SQE_WR_OTHER = 37,
+ PERF_CP_AHB_STALL_SQE_RD_OTHER = 38,
+ PERF_CP_CLUSTER0_EMPTY = 39,
+ PERF_CP_CLUSTER1_EMPTY = 40,
+ PERF_CP_CLUSTER2_EMPTY = 41,
+ PERF_CP_CLUSTER3_EMPTY = 42,
+ PERF_CP_CLUSTER4_EMPTY = 43,
+ PERF_CP_CLUSTER5_EMPTY = 44,
+ PERF_CP_PM4_DATA = 45,
+ PERF_CP_PM4_HEADERS = 46,
+ PERF_CP_VBIF_READ_BEATS = 47,
+ PERF_CP_VBIF_WRITE_BEATS = 48,
+ PERF_CP_SQE_INSTR_COUNTER = 49,
+};
+
+enum a6xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a6xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+ PERF_PC_TSE_TRANSACTION = 37,
+ PERF_PC_TSE_VERTEX = 38,
+ PERF_PC_TESS_PC_UV_TRANS = 39,
+ PERF_PC_TESS_PC_UV_PATCHES = 40,
+ PERF_PC_TESS_FACTOR_TRANS = 41,
+};
+
+enum a6xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 3,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 4,
+ PERF_VFD_STARVE_CYCLES_UCHE = 5,
+ PERF_VFD_RBUFFER_FULL = 6,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 7,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8,
+ PERF_VFD_NUM_ATTRIBUTES = 9,
+ PERF_VFD_UPPER_SHADER_FIBERS = 10,
+ PERF_VFD_LOWER_SHADER_FIBERS = 11,
+ PERF_VFD_MODE_0_FIBERS = 12,
+ PERF_VFD_MODE_1_FIBERS = 13,
+ PERF_VFD_MODE_2_FIBERS = 14,
+ PERF_VFD_MODE_3_FIBERS = 15,
+ PERF_VFD_MODE_4_FIBERS = 16,
+ PERF_VFD_TOTAL_VERTICES = 17,
+ PERF_VFDP_STALL_CYCLES_VFD = 18,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 20,
+ PERF_VFDP_STARVE_CYCLES_PC = 21,
+ PERF_VFDP_VS_STAGE_WAVES = 22,
+};
+
+enum a6xx_hslq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_1X_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_2X_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_CS_INVOCATIONS = 9,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 10,
+ PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11,
+ PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12,
+ PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13,
+ PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14,
+ PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15,
+ PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16,
+ PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17,
+ PERF_HLSQ_STALL_CYCLES_VPC = 18,
+ PERF_HLSQ_PIXELS = 19,
+ PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20,
+};
+
+enum a6xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_STARVE_CYCLES_SP = 7,
+ PERF_VPC_STARVE_CYCLES_LRZ = 8,
+ PERF_VPC_PC_PRIMITIVES = 9,
+ PERF_VPC_SP_COMPONENTS = 10,
+ PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11,
+ PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12,
+ PERF_VPC_RB_VISIBLE_PRIMITIVES = 13,
+ PERF_VPC_LM_TRANSACTION = 14,
+ PERF_VPC_STREAMOUT_TRANSACTION = 15,
+ PERF_VPC_VS_BUSY_CYCLES = 16,
+ PERF_VPC_PS_BUSY_CYCLES = 17,
+ PERF_VPC_VS_WORKING_CYCLES = 18,
+ PERF_VPC_PS_WORKING_CYCLES = 19,
+ PERF_VPC_STARVE_CYCLES_RB = 20,
+ PERF_VPC_NUM_VPCRAM_READ_POS = 21,
+ PERF_VPC_WIT_FULL_CYCLES = 22,
+ PERF_VPC_VPCRAM_FULL_CYCLES = 23,
+ PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24,
+ PERF_VPC_NUM_VPCRAM_WRITE = 25,
+ PERF_VPC_NUM_VPCRAM_READ_SO = 26,
+ PERF_VPC_NUM_ATTR_REQ_LM = 27,
+};
+
+enum a6xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CYCLES = 18,
+ PERF_TSE_CLIP_PLANES = 19,
+};
+
+enum a6xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+ PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10,
+ PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11,
+ PERF_RAS_BLOCKS = 12,
+};
+
+enum a6xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_ARBITER = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_TPH_REF_FULL = 30,
+ PERF_UCHE_TPH_VICTIM_FULL = 31,
+ PERF_UCHE_TPH_EXT_FULL = 32,
+ PERF_UCHE_VBIF_STALL_WRITE_DATA = 33,
+ PERF_UCHE_DCMP_LATENCY_SAMPLES = 34,
+ PERF_UCHE_DCMP_LATENCY_CYCLES = 35,
+ PERF_UCHE_VBIF_READ_BEATS_PC = 36,
+ PERF_UCHE_READ_REQUESTS_PC = 37,
+ PERF_UCHE_RAM_READ_REQ = 38,
+ PERF_UCHE_RAM_WRITE_REQ = 39,
+};
+
+enum a6xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 23,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 24,
+ PERF_TP_OUTPUT_PIXELS_POINT = 25,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 26,
+ PERF_TP_OUTPUT_PIXELS_MIP = 27,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 28,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29,
+ PERF_TP_FLAG_CACHE_REQUESTS = 30,
+ PERF_TP_FLAG_CACHE_MISSES = 31,
+ PERF_TP_L1_5_L2_REQUESTS = 32,
+ PERF_TP_2D_OUTPUT_PIXELS = 33,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 34,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37,
+ PERF_TP_TPA2TPC_TRANS = 38,
+ PERF_TP_L1_MISSES_ASTC_1TILE = 39,
+ PERF_TP_L1_MISSES_ASTC_2TILE = 40,
+ PERF_TP_L1_MISSES_ASTC_4TILE = 41,
+ PERF_TP_L1_5_L2_COMPRESS_REQS = 42,
+ PERF_TP_L1_5_L2_COMPRESS_MISS = 43,
+ PERF_TP_L1_BANK_CONFLICT = 44,
+ PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45,
+ PERF_TP_L1_5_MISS_LATENCY_TRANS = 46,
+ PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47,
+ PERF_TP_FRONTEND_WORKING_CYCLES = 48,
+ PERF_TP_L1_TAG_WORKING_CYCLES = 49,
+ PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50,
+ PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51,
+ PERF_TP_BACKEND_WORKING_CYCLES = 52,
+ PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53,
+ PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54,
+ PERF_TP_STARVE_CYCLES_SP = 55,
+ PERF_TP_STARVE_CYCLES_UCHE = 56,
+};
+
+enum a6xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_NON_EXECUTION_CYCLES = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42,
+ PERF_SP_VS_INSTRUCTIONS = 43,
+ PERF_SP_FS_INSTRUCTIONS = 44,
+ PERF_SP_ADDR_LOCK_COUNT = 45,
+ PERF_SP_UCHE_READ_TRANS = 46,
+ PERF_SP_UCHE_WRITE_TRANS = 47,
+ PERF_SP_EXPORT_VPC_TRANS = 48,
+ PERF_SP_EXPORT_RB_TRANS = 49,
+ PERF_SP_PIXELS_KILLED = 50,
+ PERF_SP_ICL1_REQUESTS = 51,
+ PERF_SP_ICL1_MISSES = 52,
+ PERF_SP_HS_INSTRUCTIONS = 53,
+ PERF_SP_DS_INSTRUCTIONS = 54,
+ PERF_SP_GS_INSTRUCTIONS = 55,
+ PERF_SP_CS_INSTRUCTIONS = 56,
+ PERF_SP_GPR_READ = 57,
+ PERF_SP_GPR_WRITE = 58,
+ PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59,
+ PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60,
+ PERF_SP_LM_BANK_CONFLICTS = 61,
+ PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62,
+ PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63,
+ PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64,
+ PERF_SP_LM_WORKING_CYCLES = 65,
+ PERF_SP_DISPATCHER_WORKING_CYCLES = 66,
+ PERF_SP_SEQUENCER_WORKING_CYCLES = 67,
+ PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68,
+ PERF_SP_STARVE_CYCLES_HLSQ = 69,
+ PERF_SP_NON_EXECUTION_LS_CYCLES = 70,
+ PERF_SP_WORKING_EU = 71,
+ PERF_SP_ANY_EU_WORKING = 72,
+ PERF_SP_WORKING_EU_FS_STAGE = 73,
+ PERF_SP_ANY_EU_WORKING_FS_STAGE = 74,
+ PERF_SP_WORKING_EU_VS_STAGE = 75,
+ PERF_SP_ANY_EU_WORKING_VS_STAGE = 76,
+ PERF_SP_WORKING_EU_CS_STAGE = 77,
+ PERF_SP_ANY_EU_WORKING_CS_STAGE = 78,
+ PERF_SP_GPR_READ_PREFETCH = 79,
+ PERF_SP_GPR_READ_CONFLICT = 80,
+ PERF_SP_GPR_WRITE_CONFLICT = 81,
+ PERF_SP_GM_LOAD_LATENCY_CYCLES = 82,
+ PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83,
+ PERF_SP_EXECUTABLE_WAVES = 84,
+};
+
+enum a6xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_HLSQ = 1,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 2,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 4,
+ PERF_RB_STARVE_CYCLES_SP = 5,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 6,
+ PERF_RB_STARVE_CYCLES_CCU = 7,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 8,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 9,
+ PERF_RB_Z_WORKLOAD = 10,
+ PERF_RB_HLSQ_ACTIVE = 11,
+ PERF_RB_Z_READ = 12,
+ PERF_RB_Z_WRITE = 13,
+ PERF_RB_C_READ = 14,
+ PERF_RB_C_WRITE = 15,
+ PERF_RB_TOTAL_PASS = 16,
+ PERF_RB_Z_PASS = 17,
+ PERF_RB_Z_FAIL = 18,
+ PERF_RB_S_FAIL = 19,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 20,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 21,
+ PERF_RB_PS_INVOCATIONS = 22,
+ PERF_RB_2D_ALIVE_CYCLES = 23,
+ PERF_RB_2D_STALL_CYCLES_A2D = 24,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 25,
+ PERF_RB_2D_STARVE_CYCLES_SP = 26,
+ PERF_RB_2D_STARVE_CYCLES_DST = 27,
+ PERF_RB_2D_VALID_PIXELS = 28,
+ PERF_RB_3D_PIXELS = 29,
+ PERF_RB_BLENDER_WORKING_CYCLES = 30,
+ PERF_RB_ZPROC_WORKING_CYCLES = 31,
+ PERF_RB_CPROC_WORKING_CYCLES = 32,
+ PERF_RB_SAMPLER_WORKING_CYCLES = 33,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37,
+ PERF_RB_STALL_CYCLES_VPC = 38,
+ PERF_RB_2D_INPUT_TRANS = 39,
+ PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40,
+ PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41,
+ PERF_RB_BLENDED_FP32_COMPONENTS = 42,
+ PERF_RB_COLOR_PIX_TILES = 43,
+ PERF_RB_STALL_CYCLES_CCU = 44,
+ PERF_RB_EARLY_Z_ARB3_GRANT = 45,
+ PERF_RB_LATE_Z_ARB3_GRANT = 46,
+ PERF_RB_EARLY_Z_SKIP_GRANT = 47,
+};
+
+enum a6xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+ PERF_VSC_INPUT_TILES = 4,
+};
+
+enum a6xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16,
+ PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17,
+ PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 20,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 21,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 22,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 23,
+ PERF_CCU_COLOR_READ_FLAG5_COUNT = 24,
+ PERF_CCU_COLOR_READ_FLAG6_COUNT = 25,
+ PERF_CCU_COLOR_READ_FLAG8_COUNT = 26,
+ PERF_CCU_2D_RD_REQ = 27,
+ PERF_CCU_2D_WR_REQ = 28,
+};
+
+enum a6xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+ PERF_LRZ_FULLY_COVERED_TILES = 19,
+ PERF_LRZ_PARTIAL_COVERED_TILES = 20,
+ PERF_LRZ_FEEDBACK_ACCEPT = 21,
+ PERF_LRZ_FEEDBACK_DISCARD = 22,
+ PERF_LRZ_FEEDBACK_STALL = 23,
+ PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24,
+ PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25,
+ PERF_LRZ_STALL_CYCLES_VC = 26,
+ PERF_LRZ_RAS_MASK_TRANS = 27,
+};
+
+enum a6xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_ARB = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27,
+ PERF_CMPDECMP_2D_RD_DATA = 28,
+ PERF_CMPDECMP_2D_WR_DATA = 29,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31,
+ PERF_CMPDECMP_2D_OUTPUT_TRANS = 32,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35,
+ PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36,
+ PERF_CMPDECMP_2D_BUSY_CYCLES = 37,
+ PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38,
+ PERF_CMPDECMP_2D_PIXELS = 39,
};
enum a6xx_tex_filter {
@@ -1765,12 +2444,39 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_VBIF_VERSION 0x00003000
+#define REG_A6XX_VBIF_CLKON 0x00003001
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c
+
#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -1813,313 +2519,79 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
-
-#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
-
-#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
-
-#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
-
-#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
-
-#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
-
-#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
-
-#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
-
-#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
-#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
-#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
{
- return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
}
-#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
{
- return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
}
-#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
-#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
+ return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
}
-#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
+#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
+ return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_X1_BIN_SIZE 0x000080a1
-#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
+#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
+#define A6XX_SP_TP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
}
-#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_X2_BIN_SIZE 0x00008800
-#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
+#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
+ return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
}
-#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
+ return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
}
+#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_GRAS_BIN_CONTROL_USE_VIZ 0x00200000
-#define REG_A6XX_X3_BIN_SIZE 0x000088d3
-#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
+#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
}
-#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
}
#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
@@ -2182,11 +2654,19 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO 0x00000c30
-#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_HI 0x00000c31
-#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+#define REG_A6XX_VSC_PIPE_DATA2_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA2_ARRAY_PITCH 0x00000c33
+#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK;
+}
#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
@@ -2194,18 +2674,29 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+#define REG_A6XX_VSC_PIPE_DATA_ARRAY_PITCH 0x00000c37
+#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_DATA_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK;
+}
+
static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+#define REG_A6XX_GRAS_UNKNOWN_8000 0x00008000
+
#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
#define REG_A6XX_GRAS_CNTL 0x00008005
#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_UNK3 0x00000008
#define A6XX_GRAS_CNTL_XCOORD 0x00000040
#define A6XX_GRAS_CNTL_YCOORD 0x00000080
#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
@@ -2308,6 +2799,9 @@ static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
}
+#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
@@ -2344,6 +2838,8 @@ static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_dep
#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+#define REG_A6XX_GRAS_UNKNOWN_80A0 0x000080a0
+
#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2464,6 +2960,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
+
#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
@@ -2494,6 +2992,10 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+#define REG_A6XX_GRAS_UNKNOWN_8109 0x00008109
+
+#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
+
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
@@ -2590,6 +3092,33 @@ static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+#define REG_A6XX_RB_BIN_CONTROL 0x00008800
+#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_RB_BIN_CONTROL_USE_VIZ 0x00200000
+
+#define REG_A6XX_RB_RENDER_CNTL 0x00008801
+#define A6XX_RB_RENDER_CNTL_UNK4 0x00000010
+#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+
#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2615,6 +3144,7 @@ static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_UNK3 0x00000008
#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
@@ -2747,6 +3277,10 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe
#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+#define REG_A6XX_RB_UNKNOWN_8810 0x00008810
+
+#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
+
#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
@@ -2837,7 +3371,6 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
-#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
@@ -2923,6 +3456,9 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
}
+#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
+#define A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
@@ -3053,6 +3589,12 @@ static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
{
return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
}
+#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00
+#define A6XX_RB_STENCILREF_BFREF__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK;
+}
#define REG_A6XX_RB_STENCILMASK 0x00008888
#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
@@ -3061,6 +3603,12 @@ static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
{
return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
}
+#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK;
+}
#define REG_A6XX_RB_STENCILWRMASK 0x00008889
#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
@@ -3069,6 +3617,12 @@ static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
{
return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
}
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK;
+}
#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3177,14 +3731,14 @@ static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_RB_BLIT_INFO 0x000088e3
#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
-#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
-#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
-#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
-#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
-static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
{
- return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+ return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
}
#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
@@ -3274,12 +3828,16 @@ static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04
+
#define REG_A6XX_RB_CCU_CNTL 0x00008e07
#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
+
#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
@@ -3385,6 +3943,9 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+#define REG_A6XX_VPC_SO_OVERRIDE 0x00009306
+#define A6XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
+
#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
@@ -3397,8 +3958,14 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+#define REG_A6XX_PC_UNKNOWN_9806 0x00009806
+
+#define REG_A6XX_PC_UNKNOWN_9980 0x00009980
+
#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+#define REG_A6XX_PC_UNKNOWN_9990 0x00009990
+
#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
@@ -3410,6 +3977,7 @@ static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
{
return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
}
+#define A6XX_PC_PRIMITIVE_CNTL_1_PSIZE 0x00000100
#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
@@ -3488,6 +4056,8 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+#define REG_A6XX_VFD_UNKNOWN_A009 0x0000a009
+
#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
@@ -3640,6 +4210,8 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_UNKNOWN_A81B 0x0000a81b
+
#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
@@ -3884,6 +4456,8 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_UNKNOWN_A982 0x0000a982
+
#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
@@ -3979,7 +4553,8 @@ static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
}
#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
-#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_UNKNOWN_A99E 0x0000a99e
#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
@@ -4066,14 +4641,20 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
+
#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
+
#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -4097,6 +4678,8 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples
#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+#define REG_A6XX_SP_TP_UNKNOWN_B309 0x0000b309
+
#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4162,6 +4745,8 @@ static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
}
+#define REG_A6XX_HLSQ_UNKNOWN_B980 0x0000b980
+
#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
@@ -4537,11 +5122,11 @@ static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
}
#define REG_A6XX_TEX_CONST_8 0x00000008
-#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
-#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
{
- return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+ return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK;
}
#define REG_A6XX_TEX_CONST_9 0x00000009
@@ -4558,5 +5143,227 @@ static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
#define REG_A6XX_TEX_CONST_15 0x0000000f
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index bbb8126ec5c5..d4e98e5876bc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
-#include <linux/iopoll.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@@ -42,9 +41,6 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
- if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
- tasklet_schedule(&gmu->hfi_tasklet);
-
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
@@ -65,12 +61,14 @@ static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
-static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
+ int ret;
+
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
- ((index << 24) & 0xff) | (3 & 0xf));
+ ((3 & 0xf) << 28) | index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
@@ -82,7 +80,37 @@ static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
- return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ if (ret)
+ dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
+
+ gmu->freq = gmu->gpu_freqs[index];
+}
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index = 0;
+
+ if (freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ __a6xx_gmu_set_freq(gmu, perf_index);
+}
+
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return gmu->freq;
}
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
@@ -135,9 +163,6 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
u32 val;
int ret;
- gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
-
gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
@@ -348,8 +373,23 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
+static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+{
+ return msm_writel(value, ptr + (offset << 2));
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name);
+
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+ void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+
+ if (!pdcptr || !seqptr)
+ goto err;
+
/* Disable SDE clock gating */
gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
@@ -374,44 +414,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
/* Load PDC sequencer uCode for power up and power down sequence */
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
/* Set TCS commands used by PDC sequence for low power modes */
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
/* ensure no writes happen before the uCode is fully written */
wmb();
+
+err:
+ devm_iounmap(gmu->dev, pdcptr);
+ devm_iounmap(gmu->dev, seqptr);
}
/*
@@ -547,8 +591,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
}
#define A6XX_HFI_IRQ_MASK \
- (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
- A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+ (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
#define A6XX_GMU_IRQ_MASK \
(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
@@ -626,7 +669,7 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
/* Set the GPU back to the highest power frequency */
- a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
out:
if (ret)
@@ -665,7 +708,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
ret = a6xx_hfi_start(gmu, status);
/* Set the GPU to the highest power frequency */
- a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
out:
/* Make sure to turn off the boot OOB request on error */
@@ -1140,7 +1183,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, false);
+ of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1170,11 +1213,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
-
- /* Map the GPU power domain controller registers */
- gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
-
- if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
+ if (IS_ERR(gmu->mmio))
goto err;
/* Get the HFI and GMU interrupts */
@@ -1184,9 +1223,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
- /* Set up a tasklet to handle GMU HFI responses */
- tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
-
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index d9a386c18799..35f765afae45 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -4,6 +4,7 @@
#ifndef _A6XX_GMU_H_
#define _A6XX_GMU_H_
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@@ -47,7 +48,6 @@ struct a6xx_gmu {
struct device *dev;
void * __iomem mmio;
- void * __iomem pdc_mmio;
int hfi_irq;
int gmu_irq;
@@ -74,6 +74,8 @@ struct a6xx_gmu {
unsigned long gmu_freqs[4];
u32 cx_arc_votes[4];
+ unsigned long freq;
+
struct a6xx_hfi_queue queues[2];
struct tasklet_struct hfi_tasklet;
@@ -89,11 +91,6 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
return msm_writel(value, gmu->mmio + (offset << 2));
}
-static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
-{
- return msm_writel(value, gmu->pdc_mmio + (offset << 2));
-}
-
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@@ -103,6 +100,16 @@ static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
gmu_write(gmu, reg, val | or);
}
+static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ val = (u64) msm_readl(gmu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
@@ -157,6 +164,4 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
-void a6xx_hfi_task(unsigned long data);
-
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index ef68098d2adc..db56f263ed77 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -167,8 +167,8 @@ static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_
#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c629f742a1d1..631257c297fd 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -7,6 +7,8 @@
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+#include <linux/devfreq.h>
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -438,10 +440,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x8d0, 0x23));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@@ -682,6 +682,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
gpu->needs_hw_init = true;
+ msm_gpu_resume_devfreq(gpu);
+
return ret;
}
@@ -690,6 +692,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+
/*
* Make sure the GMU is idle before continuing (because some transitions
* may use VBIF
@@ -744,7 +748,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
if (a6xx_gpu->sqe_bo) {
if (a6xx_gpu->sqe_iova)
msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
a6xx_gmu_remove(a6xx_gpu);
@@ -753,6 +757,27 @@ static void a6xx_destroy(struct msm_gpu *gpu)
kfree(a6xx_gpu);
}
+static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles, busy_time;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
+ do_div(busy_time, 192);
+
+ gpu->devfreq.busy_cycles = busy_cycles;
+
+ if (WARN_ON(busy_time > ~0LU))
+ return ~0LU;
+
+ return (unsigned long)busy_time;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -768,6 +793,9 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a6xx_show,
#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gmu_set_freq,
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -799,7 +827,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
/* Check if there is a GMU phandle and set it up */
- node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
/* FIXME: How do we gracefully handle this? */
BUG_ON(!node);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index dd69e5b0e692..4127dcebc202 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -56,5 +56,6 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
-
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index f19ef4cb6ea4..6ff9baec2658 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -79,83 +79,72 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
-struct a6xx_hfi_response {
- u32 id;
- u32 seqnum;
- struct list_head node;
- struct completion complete;
-
- u32 error;
- u32 payload[16];
-};
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ u32 val;
+ int ret;
-/*
- * Incoming HFI ack messages can come in out of order so we need to store all
- * the pending messages on a list until they are handled.
- */
-static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
-static LIST_HEAD(hfi_ack_list);
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
-static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
- struct a6xx_hfi_msg_response *msg)
-{
- struct a6xx_hfi_response *resp;
- u32 id, seqnum;
-
- /* msg->ret_header contains the header of the message being acked */
- id = HFI_HEADER_ID(msg->ret_header);
- seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
-
- spin_lock(&hfi_ack_lock);
- list_for_each_entry(resp, &hfi_ack_list, node) {
- if (resp->id == id && resp->seqnum == seqnum) {
- resp->error = msg->error;
- memcpy(resp->payload, msg->payload,
- sizeof(resp->payload));
-
- complete(&resp->complete);
- spin_unlock(&hfi_ack_lock);
- return;
- }
+ if (ret) {
+ dev_err(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return -ETIMEDOUT;
}
- spin_unlock(&hfi_ack_lock);
- dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
-}
+ /* Clear the interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
-static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
- struct a6xx_hfi_msg_response *msg)
-{
- struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
+ for (;;) {
+ struct a6xx_hfi_msg_response resp;
- dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
-}
+ /* Get the next packet */
+ ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
-void a6xx_hfi_task(unsigned long data)
-{
- struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
- struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- struct a6xx_hfi_msg_response resp;
+ /* If the queue is empty our response never made it */
+ if (!ret) {
+ dev_err(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
- for (;;) {
- u32 id;
- int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
- sizeof(resp) >> 2);
+ return -ENOENT;
+ }
+
+ if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
+ struct a6xx_hfi_msg_error *error =
+ (struct a6xx_hfi_msg_error *) &resp;
- /* Returns the number of bytes copied or negative on error */
- if (ret <= 0) {
- if (ret < 0)
- dev_err(gmu->dev,
- "Unable to read the HFI message queue\n");
- break;
+ dev_err(gmu->dev, "GMU firmware error %d\n",
+ error->code);
+ continue;
+ }
+
+ if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
+ dev_err(gmu->dev,
+ "Unexpected message id %d on the response queue\n",
+ HFI_HEADER_SEQNUM(resp.ret_header));
+ continue;
+ }
+
+ if (resp.error) {
+ dev_err(gmu->dev,
+ "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
}
- id = HFI_HEADER_ID(resp.header);
+ /* All is well, copy over the buffer */
+ if (payload && payload_size)
+ memcpy(payload, resp.payload,
+ min_t(u32, payload_size, sizeof(resp.payload)));
- if (id == HFI_F2H_MSG_ACK)
- a6xx_hfi_handle_ack(gmu, &resp);
- else if (id == HFI_F2H_MSG_ERROR)
- a6xx_hfi_handle_error(gmu, &resp);
+ return 0;
}
}
@@ -163,7 +152,6 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
void *data, u32 size, u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
- struct a6xx_hfi_response resp = { 0 };
int ret, dwords = size >> 2;
u32 seqnum;
@@ -173,53 +161,14 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
(dwords << 8) | id;
- init_completion(&resp.complete);
- resp.id = id;
- resp.seqnum = seqnum;
-
- spin_lock_bh(&hfi_ack_lock);
- list_add_tail(&resp.node, &hfi_ack_list);
- spin_unlock_bh(&hfi_ack_lock);
-
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
dev_err(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
- goto out;
- }
-
- /* Wait up to 5 seconds for the response */
- ret = wait_for_completion_timeout(&resp.complete,
- msecs_to_jiffies(5000));
- if (!ret) {
- dev_err(gmu->dev,
- "Message %s id %d timed out waiting for response\n",
- a6xx_hfi_msg_id[id], seqnum);
- ret = -ETIMEDOUT;
- } else
- ret = 0;
-
-out:
- spin_lock_bh(&hfi_ack_lock);
- list_del(&resp.node);
- spin_unlock_bh(&hfi_ack_lock);
-
- if (ret)
return ret;
-
- if (resp.error) {
- dev_err(gmu->dev, "Message %s id %d returned error %d\n",
- a6xx_hfi_msg_id[id], seqnum, resp.error);
- return -EINVAL;
}
- if (payload && payload_size) {
- int copy = min_t(u32, payload_size, sizeof(resp.payload));
-
- memcpy(payload, resp.payload, copy);
- }
-
- return 0;
+ return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
}
static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 5dace1350810..1318959d504d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7d3e9a129ac7..86abdb2b3a9c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -120,6 +120,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 03a91e10b310..15eb03bed984 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -237,7 +237,7 @@ enum adreno_pm4_type3_packets {
CP_UNK_A6XX_14 = 20,
CP_UNK_A6XX_36 = 54,
CP_UNK_A6XX_55 = 85,
- UNK_A6XX_6D = 109,
+ CP_REG_WRITE = 109,
};
enum adreno_state_block {
@@ -968,19 +968,19 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
}
#define REG_CP_SET_BIN_DATA5_5 0x00000005
-#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+ return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_SET_BIN_DATA5_6 0x00000006
-#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+ return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_REG_TO_MEM_0 0x00000000
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 80cbf75bc2ff..d4530d60767b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -47,237 +47,17 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
-#define MISR_BUFF_SIZE 256
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
-{
- struct msm_drm_private *priv;
-
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return NULL;
- }
- priv = crtc->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid kms\n");
- return NULL;
- }
-
- return to_dpu_kms(priv->kms);
-}
-
-static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
-{
- struct drm_crtc *crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_crtc) {
- DPU_ERROR("invalid dpu crtc\n");
- return -EINVAL;
- }
-
- crtc = &dpu_crtc->base;
- if (!crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid drm device\n");
- return -EINVAL;
- }
-
- priv = crtc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
-
-/**
- * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
- * @rp: Pointer to resource pool
- * return: Pointer to drm crtc if success; null otherwise
- */
-static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
-{
- if (!rp)
- return NULL;
-
- return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
-}
-
-/**
- * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
- * @rp: Pointer to resource pool
- * @force: True to reclaim all resources; otherwise, reclaim only unused ones
- * return: None
- */
-static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
+ struct drm_display_mode *mode)
{
- struct dpu_crtc_res *res, *next;
- struct drm_crtc *crtc;
-
- crtc = _dpu_crtc_rp_to_crtc(rp);
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
- force ? "destroy" : "free_unused");
-
- list_for_each_entry_safe(res, next, &rp->res_list, list) {
- if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
- continue;
- DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
- crtc->base.id, rp->sequence_id,
- res->type, res->tag, res->val,
- atomic_read(&res->refcount));
- list_del(&res->list);
- if (res->ops.put)
- res->ops.put(res->val);
- kfree(res);
- }
+ return mode->hdisplay / cstate->num_mixers;
}
-/**
- * _dpu_crtc_rp_free_unused - free unused resource in pool
- * @rp: Pointer to resource pool
- * return: none
- */
-static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
-{
- mutex_lock(rp->rp_lock);
- _dpu_crtc_rp_reclaim(rp, false);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_rp_destroy - destroy resource pool
- * @rp: Pointer to resource pool
- * return: None
- */
-static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
-{
- mutex_lock(rp->rp_lock);
- list_del_init(&rp->rp_list);
- _dpu_crtc_rp_reclaim(rp, true);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_hw_blk_get - get callback for hardware block
- * @val: Resource handle
- * @type: Resource type
- * @tag: Search tag for given resource
- * return: Resource handle
- */
-static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
-{
- DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
- return dpu_hw_blk_get(val, type, tag);
-}
-
-/**
- * _dpu_crtc_hw_blk_put - put callback for hardware block
- * @val: Resource handle
- * return: None
- */
-static void _dpu_crtc_hw_blk_put(void *val)
-{
- DPU_DEBUG("res://%pK\n", val);
- dpu_hw_blk_put(val);
-}
-
-/**
- * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
- * @rp: Pointer to original resource pool
- * @dup_rp: Pointer to duplicated resource pool
- * return: None
- */
-static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
- struct dpu_crtc_respool *dup_rp)
-{
- struct dpu_crtc_res *res, *dup_res;
- struct drm_crtc *crtc;
-
- if (!rp || !dup_rp || !rp->rp_head) {
- DPU_ERROR("invalid resource pool\n");
- return;
- }
-
- crtc = _dpu_crtc_rp_to_crtc(rp);
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
-
- mutex_lock(rp->rp_lock);
- dup_rp->sequence_id = rp->sequence_id + 1;
- INIT_LIST_HEAD(&dup_rp->res_list);
- dup_rp->ops = rp->ops;
- list_for_each_entry(res, &rp->res_list, list) {
- dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
- if (!dup_res) {
- mutex_unlock(rp->rp_lock);
- return;
- }
- INIT_LIST_HEAD(&dup_res->list);
- atomic_set(&dup_res->refcount, 0);
- dup_res->type = res->type;
- dup_res->tag = res->tag;
- dup_res->val = res->val;
- dup_res->ops = res->ops;
- dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
- DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
- crtc->base.id, dup_rp->sequence_id,
- dup_res->type, dup_res->tag, dup_res->val,
- atomic_read(&dup_res->refcount));
- list_add_tail(&dup_res->list, &dup_rp->res_list);
- if (dup_res->ops.get)
- dup_res->ops.get(dup_res->val, 0, -1);
- }
-
- dup_rp->rp_lock = rp->rp_lock;
- dup_rp->rp_head = rp->rp_head;
- INIT_LIST_HEAD(&dup_rp->rp_list);
- list_add_tail(&dup_rp->rp_list, rp->rp_head);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_rp_reset - reset resource pool after allocation
- * @rp: Pointer to original resource pool
- * @rp_lock: Pointer to serialization resource pool lock
- * @rp_head: Pointer to crtc resource pool head
- * return: None
- */
-static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
- struct mutex *rp_lock, struct list_head *rp_head)
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
- if (!rp || !rp_lock || !rp_head) {
- DPU_ERROR("invalid resource pool\n");
- return;
- }
+ struct msm_drm_private *priv = crtc->dev->dev_private;
- mutex_lock(rp_lock);
- rp->rp_lock = rp_lock;
- rp->rp_head = rp_head;
- INIT_LIST_HEAD(&rp->rp_list);
- rp->sequence_id = 0;
- INIT_LIST_HEAD(&rp->res_list);
- rp->ops.get = _dpu_crtc_hw_blk_get;
- rp->ops.put = _dpu_crtc_hw_blk_put;
- list_add_tail(&rp->rp_list, rp->rp_head);
- mutex_unlock(rp_lock);
+ return to_dpu_kms(priv->kms);
}
static void dpu_crtc_destroy(struct drm_crtc *crtc)
@@ -297,14 +77,29 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate)
+ struct dpu_plane_state *pstate, struct dpu_format *format)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
+ uint32_t blend_op;
+ struct drm_format_name_buf format_name;
/* default to opaque blending */
- lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
- DPU_BLEND_FG_ALPHA_FG_CONST |
- DPU_BLEND_BG_ALPHA_BG_CONST);
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+
+ if (format->alpha_enable) {
+ /* coverage blending */
+ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_INV_ALPHA;
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage,
+ 0xFF, 0, blend_op);
+
+ DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
+ drm_get_format_name(format->base.pixel_format, &format_name),
+ format->alpha_enable, blend_op);
}
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
@@ -317,9 +112,9 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
- for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
- struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
struct dpu_hw_mixer_cfg cfg;
if (!lm_roi || !drm_rect_visible(lm_roi))
@@ -339,28 +134,17 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct drm_plane *plane;
struct drm_framebuffer *fb;
struct drm_plane_state *state;
- struct dpu_crtc_state *cstate;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
- struct dpu_hw_ctl *ctl;
- struct dpu_hw_mixer *lm;
- struct dpu_hw_stage_cfg *stage_cfg;
+ struct dpu_hw_ctl *ctl = mixer->lm_ctl;
+ struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
- if (!dpu_crtc || !mixer) {
- DPU_ERROR("invalid dpu_crtc or mixer\n");
- return;
- }
-
- ctl = mixer->hw_ctl;
- lm = mixer->hw_lm;
- stage_cfg = &dpu_crtc->stage_cfg;
- cstate = to_dpu_crtc_state(crtc->state);
-
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -379,10 +163,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
- if (!format) {
- DPU_ERROR("invalid format\n");
- return;
- }
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
@@ -400,8 +180,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
fb ? fb->modifier : 0);
/* blend config update */
- for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
- _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+ for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
+ pstate, format);
mixer[lm_idx].flush_mask |= flush_mask;
@@ -422,38 +203,25 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
*/
static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *dpu_crtc_state;
- struct dpu_crtc_mixer *mixer;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
-
int i;
- if (!crtc)
- return;
-
- dpu_crtc = to_dpu_crtc(crtc);
- dpu_crtc_state = to_dpu_crtc_state(crtc->state);
- mixer = dpu_crtc->mixers;
-
DPU_DEBUG("%s\n", dpu_crtc->name);
- if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
- DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
- return;
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
- if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ for (i = 0; i < cstate->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
DPU_ERROR("invalid lm or ctl assigned to mixer\n");
return;
}
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
- if (mixer[i].hw_ctl->ops.clear_all_blendstages)
- mixer[i].hw_ctl->ops.clear_all_blendstages(
- mixer[i].hw_ctl);
+ if (mixer[i].lm_ctl->ops.clear_all_blendstages)
+ mixer[i].lm_ctl->ops.clear_all_blendstages(
+ mixer[i].lm_ctl);
}
/* initialize stage cfg */
@@ -461,8 +229,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
- ctl = mixer[i].hw_ctl;
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
lm = mixer[i].hw_lm;
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
@@ -543,34 +311,13 @@ static void dpu_crtc_vblank_cb(void *data)
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
- struct msm_drm_private *priv;
- struct dpu_crtc_frame_event *fevent;
- struct drm_crtc *crtc;
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc_frame_event *fevent = container_of(work,
+ struct dpu_crtc_frame_event, work);
+ struct drm_crtc *crtc = fevent->crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
unsigned long flags;
bool frame_done = false;
- if (!work) {
- DPU_ERROR("invalid work handle\n");
- return;
- }
-
- fevent = container_of(work, struct dpu_crtc_frame_event, work);
- if (!fevent->crtc || !fevent->crtc->state) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- crtc = fevent->crtc;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
- if (!dpu_kms) {
- DPU_ERROR("invalid kms handle\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
DPU_ATRACE_BEGIN("crtc_frame_event");
DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
@@ -636,11 +383,6 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
unsigned long flags;
u32 crtc_id;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
/* Nothing to do on idle event */
if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
return;
@@ -683,7 +425,7 @@ static void _dpu_crtc_setup_mixer_for_encoder(
struct drm_crtc *crtc,
struct drm_encoder *enc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_rm *rm = &dpu_kms->rm;
struct dpu_crtc_mixer *mixer;
@@ -695,8 +437,8 @@ static void _dpu_crtc_setup_mixer_for_encoder(
dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
/* Set up all the mixers and ctls reserved by this encoder */
- for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
- mixer = &dpu_crtc->mixers[i];
+ for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
+ mixer = &cstate->mixers[i];
if (!dpu_rm_get_hw(rm, &lm_iter))
break;
@@ -706,14 +448,14 @@ static void _dpu_crtc_setup_mixer_for_encoder(
if (!dpu_rm_get_hw(rm, &ctl_iter)) {
DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
mixer->hw_lm->idx - LM_0);
- mixer->hw_ctl = last_valid_ctl;
+ mixer->lm_ctl = last_valid_ctl;
} else {
- mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->hw_ctl;
+ mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->lm_ctl;
}
/* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->hw_ctl) {
+ if (!mixer->lm_ctl) {
DPU_ERROR("no valid ctls found for lm %d\n",
mixer->hw_lm->idx - LM_0);
return;
@@ -721,11 +463,11 @@ static void _dpu_crtc_setup_mixer_for_encoder(
mixer->encoder = enc;
- dpu_crtc->num_mixers++;
+ cstate->num_mixers++;
DPU_DEBUG("setup mixer %d: lm %d\n",
i, mixer->hw_lm->idx - LM_0);
DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->hw_ctl->idx - CTL_0);
+ i, mixer->lm_ctl->idx - CTL_0);
}
}
@@ -734,10 +476,6 @@ static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
- dpu_crtc->num_mixers = 0;
- dpu_crtc->mixers_swapped = false;
- memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
-
mutex_lock(&dpu_crtc->crtc_lock);
/* Check for mixers on all encoders attached to this crtc */
list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
@@ -753,24 +491,13 @@ static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
- struct drm_display_mode *adj_mode;
- u32 crtc_split_width;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
int i;
- if (!crtc || !state) {
- DPU_ERROR("invalid args\n");
- return;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = to_dpu_crtc_state(state);
-
- adj_mode = &state->adjusted_mode;
- crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
-
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
r->y1 = 0;
@@ -787,6 +514,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
@@ -806,10 +534,11 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!dpu_crtc->num_mixers) {
+ if (!cstate->num_mixers) {
_dpu_crtc_setup_mixers(crtc);
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
}
@@ -836,7 +565,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
_dpu_crtc_blend_setup(crtc);
@@ -861,11 +590,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
unsigned long flags;
struct dpu_crtc_state *cstate;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
crtc->base.id, crtc->state->enable);
@@ -900,7 +624,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
/*
@@ -951,8 +675,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d\n", crtc->base.id);
- _dpu_crtc_rp_destroy(&cstate->rp);
-
__drm_atomic_helper_crtc_destroy_state(state);
kfree(cstate);
@@ -960,15 +682,9 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
int ret, rc = 0;
- if (!crtc) {
- DPU_ERROR("invalid argument\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
-
if (!atomic_read(&dpu_crtc->frame_pending)) {
DPU_DEBUG("no frames pending\n");
return 0;
@@ -989,35 +705,18 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- struct drm_device *dev;
- struct dpu_crtc *dpu_crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct dpu_crtc_state *cstate;
+ struct drm_device *dev = crtc->dev;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
int ret;
- if (!crtc) {
- DPU_ERROR("invalid argument\n");
- return;
- }
- dev = crtc->dev;
- dpu_crtc = to_dpu_crtc(crtc);
- dpu_kms = _dpu_crtc_get_kms(crtc);
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
- priv = dpu_kms->dev->dev_private;
- cstate = to_dpu_crtc_state(crtc->state);
-
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
* it means we are trying to start a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
DPU_ATRACE_BEGIN("crtc_commit");
@@ -1072,33 +771,19 @@ end:
* _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
* @dpu_crtc: Pointer to dpu crtc structure
* @enable: Whether to enable/disable vblanks
- *
- * @Return: error code
*/
-static int _dpu_crtc_vblank_enable_no_lock(
+static void _dpu_crtc_vblank_enable_no_lock(
struct dpu_crtc *dpu_crtc, bool enable)
{
- struct drm_device *dev;
- struct drm_crtc *crtc;
+ struct drm_crtc *crtc = &dpu_crtc->base;
+ struct drm_device *dev = crtc->dev;
struct drm_encoder *enc;
- if (!dpu_crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
-
- crtc = &dpu_crtc->base;
- dev = crtc->dev;
-
if (enable) {
- int ret;
-
/* drop lock since power crtc cb may try to re-acquire lock */
mutex_unlock(&dpu_crtc->crtc_lock);
- ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ pm_runtime_get_sync(dev->dev);
mutex_lock(&dpu_crtc->crtc_lock);
- if (ret)
- return ret;
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
if (enc->crtc != crtc)
@@ -1125,11 +810,9 @@ static int _dpu_crtc_vblank_enable_no_lock(
/* drop lock since power crtc cb may try to re-acquire lock */
mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
+ pm_runtime_put_sync(dev->dev);
mutex_lock(&dpu_crtc->crtc_lock);
}
-
- return 0;
}
/**
@@ -1139,23 +822,7 @@ static int _dpu_crtc_vblank_enable_no_lock(
*/
static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
{
- struct dpu_crtc *dpu_crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- int ret = 0;
-
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
- priv = crtc->dev->dev_private;
-
- if (!priv->kms) {
- DPU_ERROR("invalid crtc kms\n");
- return;
- }
- dpu_kms = to_dpu_kms(priv->kms);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
@@ -1170,10 +837,7 @@ static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
crtc->base.id, enable);
else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
}
dpu_crtc->suspend = enable;
@@ -1206,8 +870,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
- _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
-
return &cstate->base;
}
@@ -1244,9 +906,6 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
return;
}
- _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
- &dpu_crtc->rp_head);
-
cstate->base.crtc = crtc;
crtc->state = &cstate->base;
}
@@ -1254,62 +913,19 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
{
struct drm_crtc *crtc = arg;
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
- struct dpu_crtc_mixer *m;
- u32 i, misr_status;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
- switch (event_type) {
- case DPU_POWER_EVENT_POST_ENABLE:
- /* restore encoder; crtc will be programmed during commit */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_virt_restore(encoder);
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
- !dpu_crtc->misr_enable)
- continue;
-
- m->hw_lm->ops.setup_misr(m->hw_lm, true,
- dpu_crtc->misr_frame_count);
- }
- break;
- case DPU_POWER_EVENT_PRE_DISABLE:
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
- !dpu_crtc->misr_enable)
- continue;
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
- misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
- dpu_crtc->misr_data[i] = misr_status ? misr_status :
- dpu_crtc->misr_data[i];
- }
- break;
- case DPU_POWER_EVENT_POST_DISABLE:
- /**
- * Nothing to do. All the planes on the CRTC will be
- * programmed for every frame
- */
- break;
- default:
- DPU_DEBUG("event:%d not handled\n", event_type);
- break;
+ dpu_encoder_virt_restore(encoder);
}
mutex_unlock(&dpu_crtc->crtc_lock);
@@ -1322,7 +938,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
- int ret;
unsigned long flags;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
@@ -1353,10 +968,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
if (dpu_crtc->enabled && !dpu_crtc->suspend &&
dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
}
dpu_crtc->enabled = false;
@@ -1379,9 +991,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
dpu_power_handle_unregister_event(dpu_crtc->phandle,
dpu_crtc->power_event);
- memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
- dpu_crtc->num_mixers = 0;
- dpu_crtc->mixers_swapped = false;
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+ cstate->num_mixers = 0;
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
@@ -1403,7 +1014,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
- int ret;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
DPU_ERROR("invalid crtc\n");
@@ -1425,10 +1035,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
}
dpu_crtc->enabled = true;
@@ -1438,9 +1045,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
drm_crtc_vblank_on(crtc);
dpu_crtc->power_event = dpu_power_handle_register_event(
- dpu_crtc->phandle,
- DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
- DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
}
@@ -1496,7 +1101,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+ mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
_dpu_crtc_setup_lm_bounds(crtc, state);
@@ -1535,8 +1140,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
cnt++;
dst = drm_plane_state_dest(pstate);
- if (!drm_rect_intersect(&clip, &dst) ||
- !drm_rect_equals(&clip, &dst)) {
+ if (!drm_rect_intersect(&clip, &dst)) {
DPU_ERROR("invalid vertical/horizontal destination\n");
DPU_ERROR("display: " DRM_RECT_FMT " plane: "
DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
@@ -1679,7 +1283,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
end:
- _dpu_crtc_rp_free_unused(&cstate->rp);
kfree(pstates);
return rc;
}
@@ -1687,7 +1290,6 @@ end:
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct dpu_crtc *dpu_crtc;
- int ret;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@@ -1698,10 +1300,7 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
if (dpu_crtc->enabled && !dpu_crtc->suspend) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
}
dpu_crtc->vblank_requested = en;
mutex_unlock(&dpu_crtc->crtc_lock);
@@ -1730,26 +1329,28 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
+
+ drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
- out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+ out_width = _dpu_crtc_get_mixer_width(cstate, mode);
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
seq_puts(s, "\n");
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
+ for (i = 0; i < cstate->num_mixers; ++i) {
+ m = &cstate->mixers[i];
if (!m->hw_lm)
seq_printf(s, "\tmixer[%d] has no lm\n", i);
- else if (!m->hw_ctl)
+ else if (!m->lm_ctl)
seq_printf(s, "\tmixer[%d] has no ctl\n", i);
else
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
- m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
out_width, mode->vdisplay);
}
@@ -1822,6 +1423,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_modeset_unlock_all(crtc->dev);
return 0;
}
@@ -1831,113 +1433,6 @@ static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
return single_open(file, _dpu_debugfs_status_show, inode->i_private);
}
-static ssize_t _dpu_crtc_misr_setup(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_mixer *m;
- int i = 0, rc;
- char buf[MISR_BUFF_SIZE + 1];
- u32 frame_count, enable;
- size_t buff_copy;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_crtc = file->private_data;
- buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
- if (copy_from_user(buf, user_buf, buff_copy)) {
- DPU_ERROR("buffer copy failed\n");
- return -EINVAL;
- }
-
- buf[buff_copy] = 0; /* end of string */
-
- if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EINVAL;
-
- rc = _dpu_crtc_power_enable(dpu_crtc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- dpu_crtc->misr_enable = enable;
- dpu_crtc->misr_frame_count = frame_count;
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- dpu_crtc->misr_data[i] = 0;
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
- continue;
-
- m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
- }
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
-
- return count;
-}
-
-static ssize_t _dpu_crtc_misr_read(struct file *file,
- char __user *user_buff, size_t count, loff_t *ppos)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_mixer *m;
- int i = 0, rc;
- u32 misr_status;
- ssize_t len = 0;
- char buf[MISR_BUFF_SIZE + 1] = {'\0'};
-
- if (*ppos)
- return 0;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_crtc = file->private_data;
- rc = _dpu_crtc_power_enable(dpu_crtc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- if (!dpu_crtc->misr_enable) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "disabled\n");
- goto buff_check;
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
- continue;
-
- misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
- dpu_crtc->misr_data[i] = misr_status ? misr_status :
- dpu_crtc->misr_data[i];
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
- m->hw_lm->idx - LM_0);
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
- dpu_crtc->misr_data[i]);
- }
-
-buff_check:
- if (count <= len) {
- len = 0;
- goto end;
- }
-
- if (copy_to_user(user_buff, buf, len)) {
- len = -EFAULT;
- goto end;
- }
-
- *ppos += len; /* increase offset */
-
-end:
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
- return len;
-}
-
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
@@ -1955,8 +1450,6 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct dpu_crtc_res *res;
- struct dpu_crtc_respool *rp;
int i;
seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
@@ -1973,17 +1466,6 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
- mutex_lock(&dpu_crtc->rp_lock);
- list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
- seq_printf(s, "rp.%d: ", rp->sequence_id);
- list_for_each_entry(res, &rp->res_list, list)
- seq_printf(s, "0x%x/0x%llx/%pK/%d ",
- res->type, res->tag, res->val,
- atomic_read(&res->refcount));
- seq_puts(s, "\n");
- }
- mutex_unlock(&dpu_crtc->rp_lock);
-
return 0;
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
@@ -1999,19 +1481,12 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
.llseek = seq_lseek,
.release = single_release,
};
- static const struct file_operations debugfs_misr_fops = {
- .open = simple_open,
- .read = _dpu_crtc_misr_read,
- .write = _dpu_crtc_misr_setup,
- };
if (!crtc)
return -EINVAL;
dpu_crtc = to_dpu_crtc(crtc);
dpu_kms = _dpu_crtc_get_kms(crtc);
- if (!dpu_kms)
- return -EINVAL;
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
@@ -2026,8 +1501,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
dpu_crtc->debugfs_root,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
- debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
- dpu_crtc, &debugfs_misr_fops);
return 0;
}
@@ -2082,7 +1555,8 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
};
/* initialize crtc */
-struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor)
{
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
@@ -2104,9 +1578,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
- mutex_init(&dpu_crtc->rp_lock);
- INIT_LIST_HEAD(&dpu_crtc->rp_head);
-
init_completion(&dpu_crtc->frame_done_comp);
INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
@@ -2119,7 +1590,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
dpu_crtc_frame_event_work);
}
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index e87109e608e9..3723b4830335 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -83,14 +83,14 @@ struct dpu_crtc_smmu_state_data {
/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
- * @hw_ctl: CTL Path HW driver context
+ * @lm_ctl: CTL Path HW driver context
* @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
- struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_ctl *lm_ctl;
struct drm_encoder *encoder;
u32 mixer_op_mode;
u32 flush_mask;
@@ -121,11 +121,6 @@ struct dpu_crtc_frame_event {
* struct dpu_crtc - virtualized CRTC data structure
* @base : Base drm crtc structure
* @name : ASCII description of this crtc
- * @num_ctls : Number of ctl paths in use
- * @num_mixers : Number of mixers in use
- * @mixers_swapped: Whether the mixers have been swapped for left/right update
- * especially in the case of DSC Merge.
- * @mixers : List of active mixers
* @event : Pointer to last received drm vblank event. If there is a
* pending vblank event, this will be non-null.
* @vsync_count : Running count of received vsync events
@@ -156,27 +151,14 @@ struct dpu_crtc_frame_event {
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
- * @misr_enable : boolean entry indicates misr enable/disable status.
- * @misr_frame_count : misr frame count provided by client
- * @misr_data : store misr data before turning off the clocks.
* @phandle: Pointer to power handler
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
- * @rp_lock : serialization lock for resource pool
- * @rp_head : list of active resource pool
- * @scl3_cfg_lut : qseed3 lut config
*/
struct dpu_crtc {
struct drm_crtc base;
char name[DPU_CRTC_NAME_SIZE];
- /* HW Resources reserved for the crtc */
- u32 num_ctls;
- u32 num_mixers;
- bool mixers_swapped;
- struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
- struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
-
struct drm_pending_vblank_event *event;
u32 vsync_count;
@@ -206,77 +188,20 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
- bool misr_enable;
- u32 misr_frame_count;
- u32 misr_data[CRTC_DUAL_MIXERS];
struct dpu_power_handle *phandle;
struct dpu_power_event *power_event;
struct dpu_core_perf_params cur_perf;
- struct mutex rp_lock;
- struct list_head rp_head;
-
struct dpu_crtc_smmu_state_data smmu_state;
};
#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
/**
- * struct dpu_crtc_res_ops - common operations for crtc resources
- * @get: get given resource
- * @put: put given resource
- */
-struct dpu_crtc_res_ops {
- void *(*get)(void *val, u32 type, u64 tag);
- void (*put)(void *val);
-};
-
-#define DPU_CRTC_RES_FLAG_FREE BIT(0)
-
-/**
- * struct dpu_crtc_res - definition of crtc resources
- * @list: list of crtc resource
- * @type: crtc resource type
- * @tag: unique identifier per type
- * @refcount: reference/usage count
- * @ops: callback operations
- * @val: resource handle associated with type/tag
- * @flags: customization flags
- */
-struct dpu_crtc_res {
- struct list_head list;
- u32 type;
- u64 tag;
- atomic_t refcount;
- struct dpu_crtc_res_ops ops;
- void *val;
- u32 flags;
-};
-
-/**
- * dpu_crtc_respool - crtc resource pool
- * @rp_lock: pointer to serialization lock
- * @rp_head: pointer to head of active resource pools of this crtc
- * @rp_list: list of crtc resource pool
- * @sequence_id: sequence identifier, incremented per state duplication
- * @res_list: list of resource managed by this resource pool
- * @ops: resource operations for parent resource pool
- */
-struct dpu_crtc_respool {
- struct mutex *rp_lock;
- struct list_head *rp_head;
- struct list_head rp_list;
- u32 sequence_id;
- struct list_head res_list;
- struct dpu_crtc_res_ops ops;
-};
-
-/**
* struct dpu_crtc_state - dpu container for atomic crtc state
* @base: Base drm crtc state structure
- * @is_ppsplit : Whether current topology requires PPSplit special handling
* @bw_control : true if bw/clk controlled by core bw/clk properties
* @bw_split_vote : true if bw controlled by llcc/dram bw properties
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
@@ -285,41 +210,41 @@ struct dpu_crtc_respool {
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @new_perf: new performance state being requested
+ * @num_mixers : Number of mixers in use
+ * @mixers : List of active mixers
+ * @num_ctls : Number of ctl paths in use
+ * @hw_ctls : List of active ctl paths
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
bool bw_control;
bool bw_split_vote;
-
- bool is_ppsplit;
struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
struct dpu_core_perf_params new_perf;
- struct dpu_crtc_respool rp;
+
+ /* HW Resources reserved for the crtc */
+ u32 num_mixers;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ u32 num_ctls;
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
};
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_get_mixer_width - get the mixer width
- * Mixer width will be same as panel width(/2 for split)
+ * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
+ * @cstate: Pointer to dpu crtc state
+ * @Return: true - has two mixers, false - has one mixer
*/
-static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
{
- u32 mixer_width;
-
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
- mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
-
- return mixer_width;
+ return cstate->num_mixers == CRTC_DUAL_MIXERS;
}
/**
@@ -375,9 +300,11 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc,
* dpu_crtc_init - create a new crtc object
* @dev: dpu device
* @plane: base plane
+ * @cursor: cursor plane
* @Return: new crtc object or error
*/
-struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor);
/**
* dpu_crtc_register_custom_event - api for enabling/disabling crtc event
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 1b4de3486ef9..96cdf06e7da2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -65,8 +65,6 @@
#define MAX_CHANNELS_PER_ENC 2
-#define MISR_BUFF_SIZE 256
-
#define IDLE_SHORT_TIMEOUT 1
#define MAX_VDISPLAY_SPLIT 1080
@@ -161,8 +159,6 @@ enum dpu_enc_rc_states {
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
- * @misr_enable: misr enable/disable status
- * @misr_frame_count: misr frame count before start capturing the data
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
* virt encoder over various state changes
@@ -179,11 +175,10 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
- uint32_t display_num_of_h_tiles;
-
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
+ struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
bool intfs_swapped;
@@ -202,8 +197,6 @@ struct dpu_encoder_virt {
struct timer_list vsync_event_timer;
struct msm_display_info disp_info;
- bool misr_enable;
- u32 misr_frame_count;
bool idle_pc_supported;
struct mutex rc_lock;
@@ -443,30 +436,22 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
}
void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i = 0;
- if (!hw_res || !drm_enc || !conn_state) {
- DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
- drm_enc != 0, hw_res != 0, conn_state != 0);
- return;
- }
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
/* Query resources used by phys encs, expected to be without overlap */
memset(hw_res, 0, sizeof(*hw_res));
- hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys && phys->ops.get_hw_resources)
- phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ phys->ops.get_hw_resources(phys, hw_res);
}
}
@@ -525,7 +510,7 @@ void dpu_encoder_helper_split_config(
hw_mdptop = phys_enc->hw_mdptop;
disp_info = &dpu_enc->disp_info;
- if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
return;
/**
@@ -660,7 +645,7 @@ static int dpu_encoder_virt_atomic_check(
if (drm_atomic_crtc_needs_modeset(crtc_state)
&& dpu_enc->mode_set_complete) {
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- conn_state, topology, true);
+ topology, true);
dpu_enc->mode_set_complete = false;
}
}
@@ -1016,9 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter;
+ struct dpu_rm_hw_iter pp_iter, ctl_iter;
struct msm_display_topology topology;
- enum dpu_rm_topology_name topology_name;
+ struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
int i = 0, ret;
if (!drm_enc) {
@@ -1051,7 +1036,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
- conn->state, topology, false);
+ topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
"failed to reserve hw resources, %d\n", ret);
@@ -1066,19 +1051,33 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
}
- topology_name = dpu_rm_get_topology_name(topology);
+ dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
+ break;
+ hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
+ }
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys) {
if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc,
- "invalid pingpong block for the encoder\n");
+ DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
+ "at idx: %d\n", i);
return;
}
+
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
+ "at idx: %d\n", i);
+ return;
+ }
+
phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = hw_ctl[i];
+
phys->connector = conn->state->connector;
- phys->topology_name = topology_name;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
@@ -1111,12 +1110,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return;
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
- dpu_enc->cur_master->hw_mdptop &&
- dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
- dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
- dpu_enc->cur_master->hw_mdptop);
-
if (dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
@@ -1153,7 +1146,7 @@ void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
- int i, ret = 0;
+ int ret = 0;
struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
@@ -1166,21 +1159,12 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
- dpu_enc->cur_master = NULL;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ /* always enable slave encoder before master */
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
+ dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
- if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
- DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
- dpu_enc->cur_master = phys;
- break;
- }
- }
-
- if (!dpu_enc->cur_master) {
- DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
- return;
- }
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
if (ret) {
@@ -1189,26 +1173,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys)
- continue;
-
- if (phys != dpu_enc->cur_master) {
- if (phys->ops.enable)
- phys->ops.enable(phys);
- }
-
- if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
- MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
- phys->ops.setup_misr(phys, true,
- dpu_enc->misr_frame_count);
- }
-
- if (dpu_enc->cur_master->ops.enable)
- dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
-
_dpu_encoder_virt_enable_helper(drm_enc);
}
@@ -1266,8 +1230,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_enc->phys_encs[i]->connector = NULL;
}
- dpu_enc->cur_master = NULL;
-
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
@@ -1397,9 +1359,9 @@ static void dpu_encoder_frame_done_callback(
/* One of the physical encoders has become idle */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] == ready_phys) {
- clear_bit(i, dpu_enc->frame_busy_mask);
trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
dpu_enc->frame_busy_mask[0]);
+ clear_bit(i, dpu_enc->frame_busy_mask);
}
}
@@ -1480,7 +1442,8 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
ret = ctl->ops.get_pending_flush(ctl);
trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
- pending_kickoff_cnt, ctl->idx, ret);
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
}
/**
@@ -1879,7 +1842,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
phys->ops.handle_post_kickoff(phys);
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
!_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
trace_dpu_enc_early_kickoff(DRMID(drm_enc),
ktime_to_ms(wakeup_time));
@@ -1955,113 +1918,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
return single_open(file, _dpu_encoder_status_show, inode->i_private);
}
-static ssize_t _dpu_encoder_misr_setup(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_encoder_virt *dpu_enc;
- int i = 0, rc;
- char buf[MISR_BUFF_SIZE + 1];
- size_t buff_copy;
- u32 frame_count, enable;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_enc = file->private_data;
-
- buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
- if (copy_from_user(buf, user_buf, buff_copy))
- return -EINVAL;
-
- buf[buff_copy] = 0; /* end of string */
-
- if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EINVAL;
-
- rc = _dpu_encoder_power_enable(dpu_enc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_enc->enc_lock);
- dpu_enc->misr_enable = enable;
- dpu_enc->misr_frame_count = frame_count;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys || !phys->ops.setup_misr)
- continue;
-
- phys->ops.setup_misr(phys, enable, frame_count);
- }
- mutex_unlock(&dpu_enc->enc_lock);
- _dpu_encoder_power_enable(dpu_enc, false);
-
- return count;
-}
-
-static ssize_t _dpu_encoder_misr_read(struct file *file,
- char __user *user_buff, size_t count, loff_t *ppos)
-{
- struct dpu_encoder_virt *dpu_enc;
- int i = 0, len = 0;
- char buf[MISR_BUFF_SIZE + 1] = {'\0'};
- int rc;
-
- if (*ppos)
- return 0;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_enc = file->private_data;
-
- rc = _dpu_encoder_power_enable(dpu_enc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_enc->enc_lock);
- if (!dpu_enc->misr_enable) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "disabled\n");
- goto buff_check;
- } else if (dpu_enc->disp_info.capabilities &
- ~MSM_DISPLAY_CAP_VID_MODE) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "unsupported\n");
- goto buff_check;
- }
-
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys || !phys->ops.collect_misr)
- continue;
-
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "Intf idx:%d\n", phys->intf_idx - INTF_0);
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
- phys->ops.collect_misr(phys));
- }
-
-buff_check:
- if (count <= len) {
- len = 0;
- goto end;
- }
-
- if (copy_to_user(user_buff, buf, len)) {
- len = -EFAULT;
- goto end;
- }
-
- *ppos += len; /* increase offset */
-
-end:
- mutex_unlock(&dpu_enc->enc_lock);
- _dpu_encoder_power_enable(dpu_enc, false);
- return len;
-}
-
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -2076,12 +1932,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
.release = single_release,
};
- static const struct file_operations debugfs_misr_fops = {
- .open = simple_open,
- .read = _dpu_encoder_misr_read,
- .write = _dpu_encoder_misr_setup,
- };
-
char name[DPU_NAME_SIZE];
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
@@ -2105,9 +1955,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
debugfs_create_file("status", 0600,
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
- debugfs_create_file("misr_data", 0600,
- dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
-
for (i = 0; i < dpu_enc->num_phys_encs; i++)
if (dpu_enc->phys_encs[i] &&
dpu_enc->phys_encs[i]->ops.late_register)
@@ -2195,6 +2042,11 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
+ if (params->split_role == ENC_ROLE_SLAVE)
+ dpu_enc->cur_slave = enc;
+ else
+ dpu_enc->cur_master = enc;
+
return 0;
}
@@ -2206,8 +2058,7 @@ static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
- struct msm_display_info *disp_info,
- int *drm_enc_mode)
+ struct msm_display_info *disp_info)
{
int ret = 0;
int i = 0;
@@ -2220,6 +2071,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
return -EINVAL;
}
+ dpu_enc->cur_master = NULL;
+
memset(&phys_params, 0, sizeof(phys_params));
phys_params.dpu_kms = dpu_kms;
phys_params.parent = &dpu_enc->base;
@@ -2228,24 +2081,17 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
DPU_DEBUG("\n");
- if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
- *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ switch (disp_info->intf_type) {
+ case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
- } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
- *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
- intf_type = INTF_HDMI;
- } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
- *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
- intf_type = INTF_DP;
- } else {
+ break;
+ default:
DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
return -EINVAL;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
- dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
-
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
@@ -2358,25 +2204,22 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *drm_enc = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
- int drm_enc_mode = DRM_MODE_ENCODER_NONE;
int ret = 0;
dpu_enc = to_dpu_encoder_virt(enc);
mutex_init(&dpu_enc->enc_lock);
- ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
- &drm_enc_mode);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
- dpu_enc->cur_master = NULL;
spin_lock_init(&dpu_enc->enc_spinlock);
atomic_set(&dpu_enc->frame_done_timeout, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
- if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 60f809fc7c13..9dbf38f446d9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -32,15 +32,9 @@
/**
* Encoder functions and data types
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
- * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
- * @display_num_of_h_tiles: Number of horizontal tiles in case of split
- * interface
- * @topology: Topology of the display
*/
struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX];
- bool needs_cdm;
- u32 display_num_of_h_tiles;
};
/**
@@ -56,11 +50,9 @@ struct dpu_encoder_kickoff_params {
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
- * @conn_state: report hw reqs based on this proposed connector state
*/
void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state);
+ struct dpu_encoder_hw_resources *hw_res);
/**
* dpu_encoder_register_vblank_callback - provide callback to encoder that
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index c7df8aad6613..964efcc757a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -22,8 +22,8 @@
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
-#include "dpu_hw_cdm.h"
#include "dpu_encoder.h"
+#include "dpu_crtc.h"
#define DPU_ENCODER_NAME_MAX 16
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @setup_misr: Sets up MISR, enable and disables based on sysfs
- * @collect_misr: Collects MISR data on frame update
* @hw_reset: Issue HW recovery such as CTL reset and clear
* DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
@@ -143,8 +141,7 @@ struct dpu_encoder_phys_ops {
struct drm_connector_state *conn_state);
void (*destroy)(struct dpu_encoder_phys *encoder);
void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state);
+ struct dpu_encoder_hw_resources *hw_res);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
@@ -154,10 +151,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
-
- void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
- bool enable, u32 frame_count);
- u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
@@ -210,8 +203,6 @@ struct dpu_encoder_irq {
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
- * @hw_cdm: Hardware interface to the cdm registers
- * @cdm_cfg: Chroma-down hardware configuration
* @hw_pp: Hardware interface to the ping pong registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
@@ -219,7 +210,6 @@ struct dpu_encoder_irq {
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
- * @topology_name: topology selected for the display
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@@ -241,15 +231,12 @@ struct dpu_encoder_phys {
const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
- struct dpu_hw_cdm *hw_cdm;
- struct dpu_hw_cdm_cfg cdm_cfg;
struct dpu_hw_pingpong *hw_pp;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
- enum dpu_rm_topology_name topology_name;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
@@ -367,11 +354,15 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
+ struct dpu_crtc_state *dpu_cstate;
+
if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
return BLEND_3D_NONE;
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ dpu_crtc_state_is_stereo(dpu_cstate))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 3084675ed425..b2d7f0ded24c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -196,9 +196,6 @@ static void dpu_encoder_phys_cmd_mode_set(
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
- struct dpu_rm_hw_iter iter;
- int i, instance;
if (!phys_enc || !mode || !adj_mode) {
DPU_ERROR("invalid args\n");
@@ -208,22 +205,6 @@ static void dpu_encoder_phys_cmd_mode_set(
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
- instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
- /* Retrieve previously allocated HW Resources. Shouldn't fail */
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i <= instance; i++) {
- if (dpu_rm_get_hw(rm, &iter))
- phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
- }
-
- if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
- DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
- PTR_ERR(phys_enc->hw_ctl));
- phys_enc->hw_ctl = NULL;
- return;
- }
-
_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
}
@@ -618,23 +599,8 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_cmd_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
- struct dpu_encoder_phys_cmd *cmd_enc =
- to_dpu_encoder_phys_cmd(phys_enc);
-
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
- DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
- return;
- }
-
- DPU_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
@@ -823,7 +789,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
- struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -836,14 +801,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
goto fail;
}
phys_enc = &cmd_enc->base;
-
- hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
- if (IS_ERR_OR_NULL(hw_mdp)) {
- ret = PTR_ERR(hw_mdp);
- DPU_ERROR("failed to get mdptop\n");
- goto fail_mdp_init;
- }
- phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
@@ -898,8 +856,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
-fail_mdp_init:
- kfree(cmd_enc);
fail:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 14fc7c2a6bb7..84de385a9f62 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -355,13 +355,14 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
{
+ struct dpu_crtc_state *dpu_cstate;
+
if (!phys_enc)
return false;
- if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
- return true;
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
- return false;
+ return dpu_cstate->num_ctls > 1;
}
static bool dpu_encoder_phys_vid_needs_single_flush(
@@ -395,9 +396,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct dpu_rm *rm;
- struct dpu_rm_hw_iter iter;
- int i, instance;
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc || !phys_enc->dpu_kms) {
@@ -405,7 +403,6 @@ static void dpu_encoder_phys_vid_mode_set(
return;
}
- rm = &phys_enc->dpu_kms->rm;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (adj_mode) {
@@ -414,21 +411,6 @@ static void dpu_encoder_phys_vid_mode_set(
DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
}
- instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
- /* Retrieve previously allocated HW Resources. Shouldn't fail */
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i <= instance; i++) {
- if (dpu_rm_get_hw(rm, &iter))
- phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
- }
- if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
- DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
- PTR_ERR(phys_enc->hw_ctl));
- phys_enc->hw_ctl = NULL;
- return;
- }
-
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
}
@@ -481,7 +463,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_hw_intf *intf;
+ struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
@@ -493,11 +475,20 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
priv = phys_enc->parent->dev->dev_private;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- intf = vid_enc->hw_intf;
ctl = phys_enc->hw_ctl;
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
- DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == phys_enc->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("hw_intf not assigned\n");
return;
}
@@ -519,7 +510,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
- ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
@@ -547,25 +538,9 @@ static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_vid_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc || !hw_res) {
- DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
- phys_enc != 0, hw_res != 0, conn_state != 0);
- return;
- }
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf) {
- DPU_ERROR("invalid arg(s), hw_intf\n");
- return;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
}
static int _dpu_encoder_phys_vid_wait_for_vblank(
@@ -756,32 +731,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
}
}
-static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
- bool enable, u32 frame_count)
-{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc)
- return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
- if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
- vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
- enable, frame_count);
-}
-
-static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc)
- return 0;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
- return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
- vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
-}
-
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
@@ -817,8 +766,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
- ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
- ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
@@ -828,8 +775,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_vid *vid_enc = NULL;
- struct dpu_rm_hw_iter iter;
- struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -846,35 +791,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc = &vid_enc->base;
- hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
- if (IS_ERR_OR_NULL(hw_mdp)) {
- ret = PTR_ERR(hw_mdp);
- DPU_ERROR("failed to get mdptop\n");
- goto fail;
- }
- phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
- /**
- * hw_intf resource permanently assigned to this encoder
- * Other resources allocated at atomic commit time by use case
- */
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
-
- if (hw_intf->idx == p->intf_idx) {
- vid_enc->hw_intf = hw_intf;
- break;
- }
- }
-
- if (!vid_enc->hw_intf) {
- ret = -EINVAL;
- DPU_ERROR("failed to get hw_intf\n");
- goto fail;
- }
-
DPU_DEBUG_VIDENC(vid_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 44ee06398b1d..512ac0834d2b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -29,6 +29,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+#define DMA_CURSOR_SDM845_MASK \
+ (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
@@ -71,7 +74,6 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
.base = 0x0, .len = 0x45C,
.features = 0,
.highest_bank_bit = 0x2,
- .has_dest_scaler = true,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
@@ -174,45 +176,35 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
-#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
- { \
- .name = _name, .id = _id, \
- .base = _base, .len = 0x1c8, \
- .features = VIG_SDM845_MASK, \
- .sblk = &_sblk, \
- .xin_id = _xinid, \
- .type = SSPP_TYPE_VIG, \
- .clk_ctrl = _clkctrl \
- }
-
-#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+#define SSPP_BLK(_name, _id, _base, _features, \
+ _sblk, _xinid, _type, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
- .features = DMA_SDM845_MASK, \
+ .features = _features, \
.sblk = &_sblk, \
.xin_id = _xinid, \
- .type = SSPP_TYPE_DMA, \
+ .type = _type, \
.clk_ctrl = _clkctrl \
}
static struct dpu_sspp_cfg sdm845_sspp[] = {
- SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
- sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
- SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
- sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
- SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
- sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
- SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
- sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
- SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
- sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
- SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
- sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
- SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
- sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
- SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
- sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
/*************************************************************
@@ -227,48 +219,23 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = MIXER_SDM845_MASK, \
.sblk = &sdm845_lm_sblk, \
- .ds = _ds, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
static struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
- LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
- LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
- LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
- LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
- LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
-};
-
-/*************************************************************
- * DS sub blocks config
- *************************************************************/
-static const struct dpu_ds_top_cfg sdm845_ds_top = {
- .name = "ds_top_0", .id = DS_TOP,
- .base = 0x60000, .len = 0xc,
- .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
- .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
- .maxupscale = MAX_UPSCALE_RATIO,
-};
-
-#define DS_BLK(_name, _id, _base) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0x800, \
- .features = DPU_SSPP_SCALER_QSEED3, \
- .top = &sdm845_ds_top \
- }
-
-static struct dpu_ds_cfg sdm845_ds[] = {
- DS_BLK("ds_0", DS_0, 0x800),
- DS_BLK("ds_1", DS_1, 0x1000),
+ LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
};
/*************************************************************
@@ -328,18 +295,6 @@ static struct dpu_intf_cfg sdm845_intf[] = {
};
/*************************************************************
- * CDM sub blocks config
- *************************************************************/
-static struct dpu_cdm_cfg sdm845_cdm[] = {
- {
- .name = "cdm_0", .id = CDM_0,
- .base = 0x79200, .len = 0x224,
- .features = 0,
- .intf_connect = BIT(INTF_3),
- },
-};
-
-/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -461,12 +416,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.sspp = sdm845_sspp,
.mixer_count = ARRAY_SIZE(sdm845_lm),
.mixer = sdm845_lm,
- .ds_count = ARRAY_SIZE(sdm845_ds),
- .ds = sdm845_ds,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
- .cdm_count = ARRAY_SIZE(sdm845_cdm),
- .cdm = sdm845_cdm,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index f0cb0d4fc80e..dc060e7358e4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -428,7 +428,6 @@ struct dpu_clk_ctrl_reg {
* @highest_bank_bit: UBWC parameter
* @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
- * @has_dest_scaler: indicates support of destination scaler
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@@ -436,7 +435,6 @@ struct dpu_mdp_cfg {
u32 highest_bank_bit;
u32 ubwc_static;
u32 ubwc_swizzle;
- bool has_dest_scaler;
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
@@ -474,50 +472,16 @@ struct dpu_sspp_cfg {
* @features bit mask identifying sub-blocks/features
* @sblk: LM Sub-blocks information
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
- * @ds: ID of connected DS, DS_MAX if unsupported
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
- u32 ds;
unsigned long lm_pair_mask;
};
/**
- * struct dpu_ds_top_cfg - information of dest scaler top
- * @id enum identifying this block
- * @base register offset of this block
- * @features bit mask identifying features
- * @version hw version of dest scaler
- * @maxinputwidth maximum input line width
- * @maxoutputwidth maximum output line width
- * @maxupscale maximum upscale ratio
- */
-struct dpu_ds_top_cfg {
- DPU_HW_BLK_INFO;
- u32 version;
- u32 maxinputwidth;
- u32 maxoutputwidth;
- u32 maxupscale;
-};
-
-/**
- * struct dpu_ds_cfg - information of dest scaler blocks
- * @id enum identifying this block
- * @base register offset wrt DS top offset
- * @features bit mask identifying features
- * @version hw version of the qseed block
- * @top DS top information
- */
-struct dpu_ds_cfg {
- DPU_HW_BLK_INFO;
- u32 version;
- const struct dpu_ds_top_cfg *top;
-};
-
-/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
@@ -530,18 +494,6 @@ struct dpu_pingpong_cfg {
};
/**
- * struct dpu_cdm_cfg - information of chroma down blocks
- * @id enum identifying this block
- * @base register offset of this block
- * @features bit mask identifying sub-blocks/features
- * @intf_connect Bitmask of INTF IDs this CDM can connect to
- */
-struct dpu_cdm_cfg {
- DPU_HW_BLK_INFO;
- unsigned long intf_connect;
-};
-
-/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
@@ -728,15 +680,9 @@ struct dpu_mdss_cfg {
u32 mixer_count;
struct dpu_lm_cfg *mixer;
- u32 ds_count;
- struct dpu_ds_cfg *ds;
-
u32 pingpong_count;
struct dpu_pingpong_cfg *pingpong;
- u32 cdm_count;
- struct dpu_cdm_cfg *cdm;
-
u32 intf_count;
struct dpu_intf_cfg *intf;
@@ -771,9 +717,7 @@ struct dpu_mdss_hw_cfg_handler {
#define BLK_DMA(s) ((s)->dma)
#define BLK_CURSOR(s) ((s)->cursor)
#define BLK_MIXER(s) ((s)->mixer)
-#define BLK_DS(s) ((s)->ds)
#define BLK_PINGPONG(s) ((s)->pingpong)
-#define BLK_CDM(s) ((s)->cdm)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
deleted file mode 100644
index 554874ba0c3b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hwio.h"
-#include "dpu_hw_catalog.h"
-#include "dpu_hw_cdm.h"
-#include "dpu_dbg.h"
-#include "dpu_kms.h"
-
-#define CDM_CSC_10_OPMODE 0x000
-#define CDM_CSC_10_BASE 0x004
-
-#define CDM_CDWN2_OP_MODE 0x100
-#define CDM_CDWN2_CLAMP_OUT 0x104
-#define CDM_CDWN2_PARAMS_3D_0 0x108
-#define CDM_CDWN2_PARAMS_3D_1 0x10C
-#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
-#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
-#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
-#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
-#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
-#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
-#define CDM_CDWN2_COEFF_COSITE_V 0x128
-#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
-#define CDM_CDWN2_OUT_SIZE 0x130
-
-#define CDM_HDMI_PACK_OP_MODE 0x200
-#define CDM_CSC_10_MATRIX_COEFF_0 0x004
-
-/**
- * Horizontal coefficients for cosite chroma downscale
- * s13 representation of coefficients
- */
-static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
-
-/**
- * Horizontal coefficients for offsite chroma downscale
- */
-static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
-
-/**
- * Vertical coefficients for cosite chroma downscale
- */
-static u32 cosite_v_coeff[] = {0x00080004};
-/**
- * Vertical coefficients for offsite chroma downscale
- */
-static u32 offsite_v_coeff[] = {0x00060002};
-
-/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
-static struct dpu_csc_cfg rgb2yuv_cfg = {
- {
- 0x0083, 0x0102, 0x0032,
- 0x1fb5, 0x1f6c, 0x00e1,
- 0x00e1, 0x1f45, 0x1fdc
- },
- { 0x00, 0x00, 0x00 },
- { 0x0040, 0x0200, 0x0200 },
- { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
- { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
-static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
- struct dpu_mdss_cfg *m,
- void __iomem *addr,
- struct dpu_hw_blk_reg_map *b)
-{
- int i;
-
- for (i = 0; i < m->cdm_count; i++) {
- if (cdm == m->cdm[i].id) {
- b->base_off = addr;
- b->blk_off = m->cdm[i].base;
- b->length = m->cdm[i].len;
- b->hwversion = m->hwversion;
- b->log_mask = DPU_DBG_MASK_CDM;
- return &m->cdm[i];
- }
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
- struct dpu_csc_cfg *data)
-{
- dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
-
- return 0;
-}
-
-static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
- struct dpu_hw_cdm_cfg *cfg)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 opmode = 0;
- u32 out_size = 0;
-
- if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
- opmode &= ~BIT(7);
- else
- opmode |= BIT(7);
-
- /* ENABLE DWNS_H bit */
- opmode |= BIT(1);
-
- switch (cfg->h_cdwn_type) {
- case CDM_CDWN_DISABLE:
- /* CLEAR METHOD_H field */
- opmode &= ~(0x18);
- /* CLEAR DWNS_H bit */
- opmode &= ~BIT(1);
- break;
- case CDM_CDWN_PIXEL_DROP:
- /* Clear METHOD_H field (pixel drop is 0) */
- opmode &= ~(0x18);
- break;
- case CDM_CDWN_AVG:
- /* Clear METHOD_H field (Average is 0x1) */
- opmode &= ~(0x18);
- opmode |= (0x1 << 0x3);
- break;
- case CDM_CDWN_COSITE:
- /* Clear METHOD_H field (Average is 0x2) */
- opmode &= ~(0x18);
- opmode |= (0x2 << 0x3);
- /* Co-site horizontal coefficients */
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
- cosite_h_coeff[0]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
- cosite_h_coeff[1]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
- cosite_h_coeff[2]);
- break;
- case CDM_CDWN_OFFSITE:
- /* Clear METHOD_H field (Average is 0x3) */
- opmode &= ~(0x18);
- opmode |= (0x3 << 0x3);
-
- /* Off-site horizontal coefficients */
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
- offsite_h_coeff[0]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
- offsite_h_coeff[1]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
- offsite_h_coeff[2]);
- break;
- default:
- pr_err("%s invalid horz down sampling type\n", __func__);
- return -EINVAL;
- }
-
- /* ENABLE DWNS_V bit */
- opmode |= BIT(2);
-
- switch (cfg->v_cdwn_type) {
- case CDM_CDWN_DISABLE:
- /* CLEAR METHOD_V field */
- opmode &= ~(0x60);
- /* CLEAR DWNS_V bit */
- opmode &= ~BIT(2);
- break;
- case CDM_CDWN_PIXEL_DROP:
- /* Clear METHOD_V field (pixel drop is 0) */
- opmode &= ~(0x60);
- break;
- case CDM_CDWN_AVG:
- /* Clear METHOD_V field (Average is 0x1) */
- opmode &= ~(0x60);
- opmode |= (0x1 << 0x5);
- break;
- case CDM_CDWN_COSITE:
- /* Clear METHOD_V field (Average is 0x2) */
- opmode &= ~(0x60);
- opmode |= (0x2 << 0x5);
- /* Co-site vertical coefficients */
- DPU_REG_WRITE(c,
- CDM_CDWN2_COEFF_COSITE_V,
- cosite_v_coeff[0]);
- break;
- case CDM_CDWN_OFFSITE:
- /* Clear METHOD_V field (Average is 0x3) */
- opmode &= ~(0x60);
- opmode |= (0x3 << 0x5);
-
- /* Off-site vertical coefficients */
- DPU_REG_WRITE(c,
- CDM_CDWN2_COEFF_OFFSITE_V,
- offsite_v_coeff[0]);
- break;
- default:
- return -EINVAL;
- }
-
- if (cfg->v_cdwn_type || cfg->h_cdwn_type)
- opmode |= BIT(0); /* EN CDWN module */
- else
- opmode &= ~BIT(0);
-
- out_size = (cfg->output_width & 0xFFFF) |
- ((cfg->output_height & 0xFFFF) << 16);
- DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
- DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
- DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
- ((0x3FF << 16) | 0x0));
-
- return 0;
-}
-
-static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
- struct dpu_hw_cdm_cfg *cdm)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct dpu_format *fmt = cdm->output_fmt;
- struct cdm_output_cfg cdm_cfg = { 0 };
- u32 opmode = 0;
- u32 csc = 0;
-
- if (!DPU_FORMAT_IS_YUV(fmt))
- return -EINVAL;
-
- if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample != DPU_CHROMA_H1V2)
- return -EINVAL; /*unsupported format */
- opmode = BIT(0);
- opmode |= (fmt->chroma_sample << 1);
- cdm_cfg.intf_en = true;
- }
-
- csc |= BIT(2);
- csc &= ~BIT(1);
- csc |= BIT(0);
-
- if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
- ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-
- DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
- DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
- return 0;
-}
-
-static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
-{
- struct cdm_output_cfg cdm_cfg = { 0 };
-
- if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
- ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-}
-
-static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
- unsigned long features)
-{
- ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
- ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
- ops->enable = dpu_hw_cdm_enable;
- ops->disable = dpu_hw_cdm_disable;
-}
-
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
-
-struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
- void __iomem *addr,
- struct dpu_mdss_cfg *m,
- struct dpu_hw_mdp *hw_mdp)
-{
- struct dpu_hw_cdm *c;
- struct dpu_cdm_cfg *cfg;
- int rc;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return ERR_PTR(-ENOMEM);
-
- cfg = _cdm_offset(idx, m, addr, &c->hw);
- if (IS_ERR_OR_NULL(cfg)) {
- kfree(c);
- return ERR_PTR(-EINVAL);
- }
-
- c->idx = idx;
- c->caps = cfg;
- _setup_cdm_ops(&c->ops, c->caps->features);
- c->hw_mdp = hw_mdp;
-
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- /*
- * Perform any default initialization for the chroma down module
- * @setup default csc coefficients
- */
- dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
-
- return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
-}
-
-void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
-{
- if (cdm)
- dpu_hw_blk_destroy(&cdm->base);
- kfree(cdm);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
deleted file mode 100644
index 5cceb1ecb8e0..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DPU_HW_CDM_H
-#define _DPU_HW_CDM_H
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hw_top.h"
-#include "dpu_hw_blk.h"
-
-struct dpu_hw_cdm;
-
-struct dpu_hw_cdm_cfg {
- u32 output_width;
- u32 output_height;
- u32 output_bit_depth;
- u32 h_cdwn_type;
- u32 v_cdwn_type;
- const struct dpu_format *output_fmt;
- u32 output_type;
- int flags;
-};
-
-enum dpu_hw_cdwn_type {
- CDM_CDWN_DISABLE,
- CDM_CDWN_PIXEL_DROP,
- CDM_CDWN_AVG,
- CDM_CDWN_COSITE,
- CDM_CDWN_OFFSITE,
-};
-
-enum dpu_hw_cdwn_output_type {
- CDM_CDWN_OUTPUT_HDMI,
- CDM_CDWN_OUTPUT_WB,
-};
-
-enum dpu_hw_cdwn_output_bit_depth {
- CDM_CDWN_OUTPUT_8BIT,
- CDM_CDWN_OUTPUT_10BIT,
-};
-
-/**
- * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
- * Assumption is these functions will be called after
- * clocks are enabled
- * @setup_csc: Programs the csc matrix
- * @setup_cdwn: Sets up the chroma down sub module
- * @enable: Enables the output to interface and programs the
- * output packer
- * @disable: Puts the cdm in bypass mode
- */
-struct dpu_hw_cdm_ops {
- /**
- * Programs the CSC matrix for conversion from RGB space to YUV space,
- * it is optional to call this function as this matrix is automatically
- * set during initialization, user should call this if it wants
- * to program a different matrix than default matrix.
- * @cdm: Pointer to the chroma down context structure
- * @data Pointer to CSC configuration data
- * return: 0 if success; error code otherwise
- */
- int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
- struct dpu_csc_cfg *data);
-
- /**
- * Programs the Chroma downsample part.
- * @cdm Pointer to chroma down context
- */
- int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
- struct dpu_hw_cdm_cfg *cfg);
-
- /**
- * Enable the CDM module
- * @cdm Pointer to chroma down context
- */
- int (*enable)(struct dpu_hw_cdm *cdm,
- struct dpu_hw_cdm_cfg *cfg);
-
- /**
- * Disable the CDM module
- * @cdm Pointer to chroma down context
- */
- void (*disable)(struct dpu_hw_cdm *cdm);
-};
-
-struct dpu_hw_cdm {
- struct dpu_hw_blk base;
- struct dpu_hw_blk_reg_map hw;
-
- /* chroma down */
- const struct dpu_cdm_cfg *caps;
- enum dpu_cdm idx;
-
- /* mdp top hw driver */
- struct dpu_hw_mdp *hw_mdp;
-
- /* ops */
- struct dpu_hw_cdm_ops ops;
-};
-
-/**
- * dpu_hw_cdm - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_cdm, base);
-}
-
-/**
- * dpu_hw_cdm_init - initializes the cdm hw driver object.
- * should be called once before accessing every cdm.
- * @idx: cdm index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m : pointer to mdss catalog data
- * @hw_mdp: pointer to mdp top hw driver object
- */
-struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
- void __iomem *addr,
- struct dpu_mdss_cfg *m,
- struct dpu_hw_mdp *hw_mdp);
-
-/**
- * dpu_hw_cdm_destroy - destroys CDM driver context
- * @cdm: pointer to CDM driver context
- */
-void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
-
-#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 06be7cf7ce50..eec1051f2afc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -224,19 +224,6 @@ static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
-static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
- u32 *flushbits, enum dpu_cdm cdm)
-{
- switch (cdm) {
- case CDM_0:
- *flushbits |= BIT(26);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -310,7 +297,7 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
int i, j;
- u8 stages;
+ int stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
@@ -485,7 +472,6 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
- ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index c66a71f8b839..6f313faca43e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -142,10 +142,6 @@ struct dpu_hw_ctl_ops {
u32 *flushbits,
enum dpu_intf blk);
- int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
- u32 *flushbits,
- enum dpu_cdm blk);
-
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index d280df5613c9..9c6bba0ac7c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -65,9 +65,6 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
-#define INTF_MISR_CTRL 0x180
-#define INTF_MISR_SIGNATURE 0x184
-
static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -246,30 +243,6 @@ static void dpu_hw_intf_get_status(
}
}
-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
- bool enable, u32 frame_count)
-{
- struct dpu_hw_blk_reg_map *c = &intf->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* clear misr data */
- wmb();
-
- if (enable)
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
-}
-
-static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
-{
- struct dpu_hw_blk_reg_map *c = &intf->hw;
-
- return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
-}
-
static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
@@ -289,8 +262,6 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
- ops->setup_misr = dpu_hw_intf_setup_misr;
- ops->collect_misr = dpu_hw_intf_collect_misr;
ops->get_line_count = dpu_hw_intf_get_line_count;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index a79d735da68d..3b77df460dea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -59,8 +59,6 @@ struct intf_status {
* @ setup_prog_fetch : enables/disables the programmable fetch logic
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
- * @ setup_misr: enables/disables MISR in HW register
- * @ collect_misr: reads and stores MISR data from HW register
* @ get_line_count: reads current vertical line counter
*/
struct dpu_hw_intf_ops {
@@ -77,11 +75,6 @@ struct dpu_hw_intf_ops {
void (*get_status)(struct dpu_hw_intf *intf,
struct intf_status *status);
- void (*setup_misr)(struct dpu_hw_intf *intf,
- bool enable, u32 frame_count);
-
- u32 (*collect_misr)(struct dpu_hw_intf *intf);
-
u32 (*get_line_count)(struct dpu_hw_intf *intf);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 4ab72b0f07a5..acb8dc8acaa5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -34,9 +34,6 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
-#define LM_MISR_CTRL 0x310
-#define LM_MISR_SIGNATURE 0x314
-
static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -171,30 +168,6 @@ static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
{
}
-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
- bool enable, u32 frame_count)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* clear misr data */
- wmb();
-
- if (enable)
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, config);
-}
-
-static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
- return DPU_REG_READ(c, LM_MISR_SIGNATURE);
-}
-
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@@ -207,8 +180,6 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
ops->setup_gc = dpu_hw_lm_gc;
- ops->setup_misr = dpu_hw_lm_setup_misr;
- ops->collect_misr = dpu_hw_lm_collect_misr;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index e29e5dab31bf..5b036aca8340 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -66,13 +66,6 @@ struct dpu_hw_lm_ops {
*/
void (*setup_gc)(struct dpu_hw_mixer *mixer,
void *cfg);
-
- /* setup_misr: enables/disables MISR in HW register */
- void (*setup_misr)(struct dpu_hw_mixer *ctx,
- bool enable, u32 frame_count);
-
- /* collect_misr: reads and stores MISR data from HW register */
- u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 35e6bf930924..68c54d2c9677 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -100,7 +100,6 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_SSPP,
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
- DPU_HW_BLK_CDM,
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
@@ -173,13 +172,6 @@ enum dpu_dspp {
DSPP_MAX
};
-enum dpu_ds {
- DS_TOP,
- DS_0,
- DS_1,
- DS_MAX
-};
-
enum dpu_ctl {
CTL_0 = 1,
CTL_1,
@@ -189,12 +181,6 @@ enum dpu_ctl {
CTL_MAX
};
-enum dpu_cdm {
- CDM_0 = 1,
- CDM_1,
- CDM_MAX
-};
-
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
@@ -246,12 +232,6 @@ enum dpu_wb {
WB_MAX
};
-enum dpu_ad {
- AD_0 = 0x1,
- AD_1,
- AD_MAX
-};
-
enum dpu_cwb {
CWB_0 = 0x1,
CWB_1,
@@ -451,15 +431,14 @@ struct dpu_mdss_color {
* Define bit masks for h/w logging.
*/
#define DPU_DBG_MASK_NONE (1 << 0)
-#define DPU_DBG_MASK_CDM (1 << 1)
-#define DPU_DBG_MASK_INTF (1 << 2)
-#define DPU_DBG_MASK_LM (1 << 3)
-#define DPU_DBG_MASK_CTL (1 << 4)
-#define DPU_DBG_MASK_PINGPONG (1 << 5)
-#define DPU_DBG_MASK_SSPP (1 << 6)
-#define DPU_DBG_MASK_WB (1 << 7)
-#define DPU_DBG_MASK_TOP (1 << 8)
-#define DPU_DBG_MASK_VBIF (1 << 9)
-#define DPU_DBG_MASK_ROT (1 << 10)
+#define DPU_DBG_MASK_INTF (1 << 1)
+#define DPU_DBG_MASK_LM (1 << 2)
+#define DPU_DBG_MASK_CTL (1 << 3)
+#define DPU_DBG_MASK_PINGPONG (1 << 4)
+#define DPU_DBG_MASK_SSPP (1 << 5)
+#define DPU_DBG_MASK_WB (1 << 6)
+#define DPU_DBG_MASK_TOP (1 << 7)
+#define DPU_DBG_MASK_VBIF (1 << 8)
+#define DPU_DBG_MASK_ROT (1 << 9)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index db2798e862fc..b8781256e21b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -98,23 +98,6 @@ static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
}
-static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
- struct cdm_output_cfg *cfg)
-{
- struct dpu_hw_blk_reg_map *c;
- u32 out_ctl = 0;
-
- if (!mdp || !cfg)
- return;
-
- c = &mdp->hw;
-
- if (cfg->intf_en)
- out_ctl |= BIT(19);
-
- DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
-}
-
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
@@ -307,7 +290,6 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
unsigned long cap)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
- ops->setup_cdm_output = dpu_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 899925aaa6d7..192e338f20bb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -52,14 +52,6 @@ struct split_pipe_cfg {
};
/**
- * struct cdm_output_cfg: output configuration for cdm
- * @intf_en : enable/disable interface output
- */
-struct cdm_output_cfg {
- bool intf_en;
-};
-
-/**
* struct dpu_danger_safe_status: danger and safe status signals
* @mdp: top level status
* @sspp: source pipe status
@@ -89,7 +81,6 @@ struct dpu_vsync_source_cfg {
* Assumption is these functions will be called after clocks are enabled.
* @setup_split_pipe : Programs the pipe control registers
* @setup_pp_split : Programs the pp split control registers
- * @setup_cdm_output : programs cdm control
* @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
@@ -102,14 +93,6 @@ struct dpu_hw_mdp_ops {
struct split_pipe_cfg *p);
/**
- * setup_cdm_output() : Setup selection control of the cdm data path
- * @mdp : mdp top context driver
- * @cfg : cdm output configuration
- */
- void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
- struct cdm_output_cfg *cfg);
-
- /**
* setup_traffic_shaper() : Setup traffic shaper control
* @mdp : mdp top context driver
* @cfg : traffic shaper configuration
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 4cabae480a7b..cb5c0170374b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -50,9 +50,6 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_CLK_CTRL0 0x54
#define QSEED3_CLK_CTRL1 0x58
#define QSEED3_CLK_STATUS 0x5C
-#define QSEED3_MISR_CTRL 0x70
-#define QSEED3_MISR_SIGNATURE_0 0x74
-#define QSEED3_MISR_SIGNATURE_1 0x78
#define QSEED3_PHASE_INIT_Y_H 0x90
#define QSEED3_PHASE_INIT_Y_V 0x94
#define QSEED3_PHASE_INIT_UV_H 0x98
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 1240f505ca53..321fc64ddd0e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -148,16 +148,6 @@ struct dpu_hw_scaler3_cfg {
struct dpu_hw_scaler3_de_cfg de;
};
-struct dpu_hw_scaler3_lut_cfg {
- bool is_configured;
- u32 *dir_lut;
- size_t dir_len;
- u32 *cir_lut;
- size_t cir_len;
- u32 *sep_lut;
- size_t sep_len;
-};
-
/**
* struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
@@ -325,12 +315,6 @@ int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
-#define MISR_FRAME_COUNT_MASK 0xFF
-#define MISR_CTRL_ENABLE BIT(8)
-#define MISR_CTRL_STATUS BIT(9)
-#define MISR_CTRL_STATUS_CLEAR BIT(10)
-#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
-
void *dpu_hw_util_get_dir(void);
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 7dd6bd2d6d37..0a683e65a9f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -450,7 +450,7 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
int i, rc;
/*TODO: Support two independent DSI connectors */
- encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
if (IS_ERR_OR_NULL(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return;
@@ -531,12 +531,13 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
struct msm_drm_private *priv;
struct dpu_mdss_cfg *catalog;
- int primary_planes_idx = 0, i, ret;
+ int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
@@ -556,16 +557,24 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
- /* Create the planes */
+ /* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
- bool primary = true;
-
- if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
- || primary_planes_idx >= max_crtc_count)
- primary = false;
-
- plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
- (1UL << max_crtc_count) - 1, 0);
+ enum drm_plane_type type;
+
+ if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
+ && cursor_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
+ type, catalog->sspp[i].features,
+ catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1, 0);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -573,7 +582,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
}
priv->planes[priv->num_planes++] = plane;
- if (primary)
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor_planes[cursor_planes_idx++] = plane;
+ else if (type == DRM_PLANE_TYPE_PRIMARY)
primary_planes[primary_planes_idx++] = plane;
}
@@ -581,7 +592,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
/* Create one CRTC per encoder */
for (i = 0; i < max_crtc_count; i++) {
- crtc = dpu_crtc_init(dev, primary_planes[i]);
+ crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
goto fail;
@@ -956,8 +967,7 @@ static void dpu_kms_handle_power_event(u32 event_type, void *usr)
if (!dpu_kms)
return;
- if (event_type == DPU_POWER_EVENT_POST_ENABLE)
- dpu_vbif_init_memtypes(dpu_kms);
+ dpu_vbif_init_memtypes(dpu_kms);
}
static int dpu_kms_hw_init(struct msm_kms *kms)
@@ -1144,10 +1154,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
/*
* Handle (re)initializations during power enable
*/
- dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
dpu_kms->power_event = dpu_power_handle_register_event(
- &dpu_kms->phandle,
- DPU_POWER_EVENT_POST_ENABLE,
+ &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
dpu_kms_handle_power_event, dpu_kms, "kms");
pm_runtime_put_sync(&dpu_kms->pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 9e533b86682c..2235ef8129f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -158,6 +158,8 @@ static void dpu_mdss_destroy(struct drm_device *dev)
_dpu_mdss_irq_domain_fini(dpu_mdss);
+ free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
@@ -215,7 +217,7 @@ int dpu_mdss_init(struct drm_device *dev)
if (ret)
goto irq_domain_error;
- ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ ret = request_irq(platform_get_irq(pdev, 0),
dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
if (ret) {
DPU_ERROR("failed to init irq: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index ec959f847d5f..d77a8cb15404 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -125,26 +125,11 @@ struct dpu_plane {
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
{
- struct msm_drm_private *priv;
+ struct msm_drm_private *priv = plane->dev->dev_private;
- if (!plane || !plane->dev)
- return NULL;
- priv = plane->dev->dev_private;
- if (!priv)
- return NULL;
return to_dpu_kms(priv->kms);
}
-static bool dpu_plane_enabled(struct drm_plane_state *state)
-{
- return state && state->fb && state->crtc;
-}
-
-static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
-{
- return state && state->crtc;
-}
-
/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
@@ -160,7 +145,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
u32 fixed_buff_size;
u32 total_fl;
- if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ if (!fmt || !plane->state || !src_width || !fmt->bpp) {
DPU_ERROR("invalid arguments\n");
return 0;
}
@@ -170,7 +155,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
- if (!dpu_plane_enabled(tmp->base.state))
+ if (!tmp->base.state->visible)
continue;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
@@ -241,26 +226,11 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u64 qos_lut;
u32 total_fl = 0, lut_usage;
- if (!plane || !fb) {
- DPU_ERROR("invalid arguments plane %d fb %d\n",
- plane != 0, fb != 0);
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
- return;
- }
-
if (!pdpu->is_rt_pipe) {
lut_usage = DPU_QOS_LUT_USAGE_NRT;
} else {
@@ -302,24 +272,10 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u32 danger_lut, safe_lut;
- if (!plane || !fb) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
- return;
- }
-
if (!pdpu->is_rt_pipe) {
danger_lut = pdpu->catalog->perf.danger_lut_tbl
[DPU_QOS_LUT_USAGE_NRT];
@@ -373,21 +329,7 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
- struct dpu_plane *pdpu;
-
- if (!plane) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
- return;
- }
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
@@ -423,35 +365,17 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
&pdpu->pipe_qos_cfg);
}
-int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
{
- struct dpu_plane *pdpu;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!pdpu->is_rt_pipe)
- goto end;
+ return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
-end:
- return 0;
}
/**
@@ -462,29 +386,9 @@ end:
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct drm_crtc *crtc)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev || !crtc) {
- DPU_ERROR("invalid arguments plane %d crtc %d\n",
- plane != 0, crtc != 0);
- return;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR("invalid pipe reference\n");
- return;
- }
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
@@ -506,28 +410,9 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
*/
static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR("invalid pipe reference\n");
- return;
- }
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
@@ -548,27 +433,12 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
/**
* _dpu_plane_get_aspace: gets the address space
*/
-static int _dpu_plane_get_aspace(
- struct dpu_plane *pdpu,
- struct dpu_plane_state *pstate,
- struct msm_gem_address_space **aspace)
+static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
+ struct dpu_plane *pdpu)
{
- struct dpu_kms *kms;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
- if (!pdpu || !pstate || !aspace) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- kms = _dpu_plane_get_kms(&pdpu->base);
- if (!kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- *aspace = kms->base.aspace;
-
- return 0;
+ return kms->base.aspace;
}
static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
@@ -576,29 +446,10 @@ static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
- struct msm_gem_address_space *aspace = NULL;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
int ret;
- if (!plane || !pstate || !pipe_cfg || !fb) {
- DPU_ERROR(
- "invalid arg(s), plane %d state %d cfg %d fb %d\n",
- plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
- return;
- }
-
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
- return;
- }
-
- ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
- return;
- }
-
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
@@ -622,15 +473,6 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
{
uint32_t i;
- if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
- !chroma_subsmpl_v) {
- DPU_ERROR(
- "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
- !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
- chroma_subsmpl_v);
- return;
- }
-
memset(scale_cfg, 0, sizeof(*scale_cfg));
memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
@@ -734,17 +576,8 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
const struct dpu_format *fmt, bool color_fill)
{
- struct dpu_hw_pixel_ext *pe;
uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
- if (!pdpu || !fmt || !pstate) {
- DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
- pdpu != 0, fmt != 0, pstate != 0);
- return;
- }
-
- pe = &pstate->pixel_ext;
-
/* don't chroma subsample if decimating */
chroma_subsmpl_h =
drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
@@ -772,21 +605,8 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
uint32_t color, uint32_t alpha)
{
const struct dpu_format *fmt;
- const struct drm_plane *plane;
- struct dpu_plane_state *pstate;
-
- if (!pdpu || !pdpu->base.state) {
- DPU_ERROR("invalid plane\n");
- return -EINVAL;
- }
-
- if (!pdpu->pipe_hw) {
- DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
- return -EINVAL;
- }
-
- plane = &pdpu->base;
- pstate = to_dpu_plane_state(plane->state);
+ const struct drm_plane *plane = &pdpu->base;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -837,12 +657,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
{
- struct dpu_plane_state *pstate;
-
- if (!drm_state)
- return;
-
- pstate = to_dpu_plane_state(drm_state);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state);
pstate->multirect_index = DPU_SSPP_RECT_SOLO;
pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
@@ -973,15 +788,6 @@ done:
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !flush_sspp) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- pstate = to_dpu_plane_state(plane->state);
-
*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
}
@@ -995,7 +801,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
- struct msm_gem_address_space *aspace;
+ struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
int ret;
if (!new_state->fb)
@@ -1003,12 +809,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
- ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
- return ret;
- }
-
/* cache aspace */
pstate->aspace = aspace;
@@ -1078,33 +878,30 @@ static bool dpu_plane_validate_src(struct drm_rect *src,
drm_rect_equals(fb_rect, src);
}
-static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- int ret = 0;
- struct dpu_plane *pdpu;
- struct dpu_plane_state *pstate;
+ int ret = 0, min_scale;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ const struct drm_crtc_state *crtc_state = NULL;
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
- uint32_t max_upscale = 1, max_downscale = 1;
uint32_t min_src_size, max_linewidth;
- int hscale = 1, vscale = 1;
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- ret = -EINVAL;
- goto exit;
- }
-
- pdpu = to_dpu_plane(plane);
- pstate = to_dpu_plane_state(state);
+ if (state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state->state,
+ state->crtc);
- if (!pdpu->pipe_sblk) {
- DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
- ret = -EINVAL;
- goto exit;
+ min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
+ pdpu->pipe_sblk->maxupscale << 16,
+ true, true);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ return ret;
}
+ if (!state->visible)
+ return 0;
src.x1 = state->src_x >> 16;
src.y1 = state->src_y >> 16;
@@ -1118,25 +915,6 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
- if (pdpu->features & DPU_SSPP_SCALER) {
- max_downscale = pdpu->pipe_sblk->maxdwnscale;
- max_upscale = pdpu->pipe_sblk->maxupscale;
- }
- if (drm_rect_width(&src) < drm_rect_width(&dst))
- hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
- else
- hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
- if (drm_rect_height(&src) < drm_rect_height(&dst))
- vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
- else
- vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
-
- DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
- dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
-
- if (!dpu_plane_enabled(state))
- goto exit;
-
fmt = to_dpu_format(msm_framebuffer_format(state->fb));
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -1147,13 +925,13 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
| BIT(DPU_SSPP_CSC_10BIT))))) {
DPU_ERROR_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
- ret = -EINVAL;
+ return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
- ret = -E2BIG;
+ return -E2BIG;
/* valid yuv image */
} else if (DPU_FORMAT_IS_YUV(fmt) &&
@@ -1162,41 +940,22 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
drm_rect_height(&src) & 0x1)) {
DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
- ret = -EINVAL;
+ return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
- ret = -EINVAL;
+ return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
- ret = -E2BIG;
-
- /* check scaler capability */
- } else if (hscale < 0 || vscale < 0) {
- DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
- DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
- ret = -E2BIG;
+ return -E2BIG;
}
-exit:
- return ret;
-}
-
-static int dpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- if (!state->fb)
- return 0;
-
- DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
-
- return dpu_plane_sspp_atomic_check(plane, state);
+ return 0;
}
void dpu_plane_flush(struct drm_plane *plane)
@@ -1245,46 +1004,16 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error)
pdpu->is_error = error;
}
-static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
{
- uint32_t nplanes, src_flags;
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
- struct dpu_plane_state *pstate;
- struct dpu_plane_state *old_pstate;
- const struct dpu_format *fmt;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- struct drm_rect src, dst;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return -EINVAL;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return -EINVAL;
- } else if (!old_state) {
- DPU_ERROR("invalid old state\n");
- return -EINVAL;
- }
-
- pdpu = to_dpu_plane(plane);
- state = plane->state;
-
- pstate = to_dpu_plane_state(state);
-
- old_pstate = to_dpu_plane_state(old_state);
-
- crtc = state->crtc;
- fb = state->fb;
- if (!crtc || !fb) {
- DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
- crtc != 0, fb != 0);
- return -EINVAL;
- }
- fmt = to_dpu_format(msm_framebuffer_format(fb));
- nplanes = fmt->num_planes;
+ uint32_t src_flags;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ const struct dpu_format *fmt =
+ to_dpu_format(msm_framebuffer_format(fb));
memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
@@ -1295,28 +1024,27 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
- src.x1 = state->src_x >> 16;
- src.y1 = state->src_y >> 16;
- src.x2 = src.x1 + (state->src_w >> 16);
- src.y2 = src.y1 + (state->src_h >> 16);
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+ crtc->base.id, DRM_RECT_ARG(&state->dst),
+ (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
- dst = drm_plane_state_dest(state);
+ pdpu->pipe_cfg.src_rect = state->src;
- DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
- ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
- crtc->base.id, DRM_RECT_ARG(&dst),
- (char *)&fmt->base.pixel_format,
- DPU_FORMAT_IS_UBWC(fmt));
+ /* state->src is 16.16, src_rect is not */
+ pdpu->pipe_cfg.src_rect.x1 >>= 16;
+ pdpu->pipe_cfg.src_rect.x2 >>= 16;
+ pdpu->pipe_cfg.src_rect.y1 >>= 16;
+ pdpu->pipe_cfg.src_rect.y2 >>= 16;
- pdpu->pipe_cfg.src_rect = src;
- pdpu->pipe_cfg.dst_rect = dst;
+ pdpu->pipe_cfg.dst_rect = state->dst;
_dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
/* skip remaining processing on color fill */
- return 0;
+ return;
}
if (pdpu->pipe_hw->ops.setup_rects) {
@@ -1387,30 +1115,13 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
}
_dpu_plane_set_qos_remap(plane);
- return 0;
}
-static void _dpu_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
- struct dpu_plane_state *pstate;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return;
- } else if (!old_state) {
- DPU_ERROR("invalid old state\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
- state = plane->state;
- pstate = to_dpu_plane_state(state);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
pstate->multirect_mode);
@@ -1426,31 +1137,17 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane,
static void dpu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return;
- }
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
- pdpu = to_dpu_plane(plane);
pdpu->is_error = false;
- state = plane->state;
DPU_DEBUG_PLANE(pdpu, "\n");
- if (!dpu_plane_sspp_enabled(state)) {
- _dpu_plane_atomic_disable(plane, old_state);
+ if (!state->visible) {
+ _dpu_plane_atomic_disable(plane);
} else {
- int ret;
-
- ret = dpu_plane_sspp_atomic_update(plane, old_state);
- /* atomic_check should have ensured that this doesn't fail */
- WARN_ON(ret < 0);
+ dpu_plane_sspp_atomic_update(plane);
}
}
@@ -1485,8 +1182,7 @@ static void dpu_plane_destroy(struct drm_plane *plane)
/* this will destroy the states as well */
drm_plane_cleanup(plane);
- if (pdpu->pipe_hw)
- dpu_hw_sspp_destroy(pdpu->pipe_hw);
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
kfree(pdpu);
}
@@ -1505,9 +1201,7 @@ static void dpu_plane_destroy_state(struct drm_plane *plane,
pstate = to_dpu_plane_state(state);
- /* remove ref count for frame buffers */
- if (state->fb)
- drm_framebuffer_put(state->fb);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
}
@@ -1827,40 +1521,17 @@ bool is_dpu_plane_virtual(struct drm_plane *plane)
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, bool primary_plane,
+ uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
const struct dpu_format_extended *format_list;
struct dpu_plane *pdpu;
- struct msm_drm_private *priv;
- struct dpu_kms *kms;
- enum drm_plane_type type;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
int ret = -EINVAL;
- if (!dev) {
- DPU_ERROR("[%u]device is NULL\n", pipe);
- goto exit;
- }
-
- priv = dev->dev_private;
- if (!priv) {
- DPU_ERROR("[%u]private data is NULL\n", pipe);
- goto exit;
- }
-
- if (!priv->kms) {
- DPU_ERROR("[%u]invalid KMS reference\n", pipe);
- goto exit;
- }
- kms = to_dpu_kms(priv->kms);
-
- if (!kms->catalog) {
- DPU_ERROR("[%u]invalid catalog reference\n", pipe);
- goto exit;
- }
-
/* create and zero local structure */
pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
if (!pdpu) {
@@ -1916,12 +1587,6 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (pdpu->features & BIT(DPU_SSPP_CURSOR))
- type = DRM_PLANE_TYPE_CURSOR;
- else if (primary_plane)
- type = DRM_PLANE_TYPE_PRIMARY;
- else
- type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
pdpu->formats, pdpu->nformats,
NULL, type, NULL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index f6fe6ddc7a3a..7fed0b627708 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -122,7 +122,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
- * @primary_plane: true if this pipe is primary plane for crtc
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
* a regular plane initialization. A non-zero primary plane
@@ -130,7 +130,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, bool primary_plane,
+ uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
index a75eebca2f37..fc14116789f2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -145,6 +145,7 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle,
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
struct dpu_power_client *client;
+ u32 event_type;
if (!phandle || !pclient) {
pr_err("invalid input argument\n");
@@ -181,19 +182,9 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle,
if (!changed)
goto end;
- if (enable) {
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_PRE_ENABLE);
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_POST_ENABLE);
-
- } else {
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_PRE_DISABLE);
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_POST_DISABLE);
- }
+ event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
+ dpu_power_event_trigger_locked(phandle, event_type);
end:
mutex_unlock(&phandle->phandle_lock);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
index 344f74464eca..a65b7a297f21 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -23,17 +23,9 @@
#include "dpu_io_util.h"
-/* event will be triggered before power handler disable */
-#define DPU_POWER_EVENT_PRE_DISABLE 0x1
-
-/* event will be triggered after power handler disable */
-#define DPU_POWER_EVENT_POST_DISABLE 0x2
-
-/* event will be triggered before power handler enable */
-#define DPU_POWER_EVENT_PRE_ENABLE 0x4
-
-/* event will be triggered after power handler enable */
-#define DPU_POWER_EVENT_POST_ENABLE 0x8
+/* events will be triggered on power handler enable/disable */
+#define DPU_POWER_EVENT_DISABLE BIT(0)
+#define DPU_POWER_EVENT_ENABLE BIT(1)
/**
* mdss_bus_vote_type: register bus vote type
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 13c0a36d4ef9..bdb117709674 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -16,7 +16,6 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
-#include "dpu_hw_cdm.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
#include "dpu_encoder.h"
@@ -25,38 +24,13 @@
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
-#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
-#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
-#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
-#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
- (t).num_comp_enc == (r).num_enc && \
- (t).num_intf == (r).num_intf)
-
-struct dpu_rm_topology_def {
- enum dpu_rm_topology_name top_name;
- int num_lm;
- int num_comp_enc;
- int num_intf;
- int num_ctl;
- int needs_split_display;
-};
-
-static const struct dpu_rm_topology_def g_top_table[] = {
- { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
- { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
- { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
- { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
-};
-
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
- * @top_ctrl: topology control preference from kernel client
- * @top: selected topology for the display
+ * @topology: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
- uint64_t top_ctrl;
- const struct dpu_rm_topology_def *topology;
+ struct msm_display_topology topology;
struct dpu_encoder_hw_resources hw_res;
};
@@ -72,13 +46,11 @@ struct dpu_rm_requirements {
* @enc_id: Reservations are tracked by Encoder DRM object ID.
* CRTCs may be connected to multiple Encoders.
* An encoder or connector id identifies the display path.
- * @topology DRM<->HW topology use case
*/
struct dpu_rm_rsvp {
struct list_head list;
uint32_t seq;
uint32_t enc_id;
- enum dpu_rm_topology_name topology;
};
/**
@@ -122,8 +94,8 @@ static void _dpu_rm_print_rsvps(
DPU_DEBUG("%d\n", stage);
list_for_each_entry(rsvp, &rm->rsvps, list) {
- DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
- rsvp->enc_id, rsvp->topology);
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
+ rsvp->enc_id);
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
@@ -146,18 +118,6 @@ struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
return rm->hw_mdp;
}
-enum dpu_rm_topology_name
-dpu_rm_get_topology_name(struct msm_display_topology topology)
-{
- int i;
-
- for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
- if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
- return g_top_table[i].top_name;
-
- return DPU_RM_TOPOLOGY_NONE;
-}
-
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
@@ -229,9 +189,6 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
case DPU_HW_BLK_CTL:
dpu_hw_ctl_destroy(hw);
break;
- case DPU_HW_BLK_CDM:
- dpu_hw_cdm_destroy(hw);
- break;
case DPU_HW_BLK_PINGPONG:
dpu_hw_pingpong_destroy(hw);
break;
@@ -305,9 +262,6 @@ static int _dpu_rm_hw_blk_create(
case DPU_HW_BLK_CTL:
hw = dpu_hw_ctl_init(id, mmio, cat);
break;
- case DPU_HW_BLK_CDM:
- hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
- break;
case DPU_HW_BLK_PINGPONG:
hw = dpu_hw_pingpong_init(id, mmio, cat);
break;
@@ -438,15 +392,6 @@ int dpu_rm_init(struct dpu_rm *rm,
}
}
- for (i = 0; i < cat->cdm_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
- cat->cdm[i].id, &cat->cdm[i]);
- if (rc) {
- DPU_ERROR("failed: cdm hw not available\n");
- goto fail;
- }
- }
-
return 0;
fail:
@@ -455,6 +400,11 @@ fail:
return rc;
}
+static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
+{
+ return top->num_intf > 1;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
@@ -538,14 +488,14 @@ static int _dpu_rm_reserve_lms(
int lm_count = 0;
int i, rc = 0;
- if (!reqs->topology->num_lm) {
- DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ if (!reqs->topology.num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
return -EINVAL;
}
/* Find a primary mixer */
dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology->num_lm &&
+ while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&pp, 0, sizeof(pp));
@@ -563,7 +513,7 @@ static int _dpu_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology->num_lm &&
+ while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@@ -578,7 +528,7 @@ static int _dpu_rm_reserve_lms(
}
}
- if (lm_count != reqs->topology->num_lm) {
+ if (lm_count != reqs->topology.num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@@ -600,14 +550,20 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
- const struct dpu_rm_topology_def *top)
+ const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
struct dpu_rm_hw_iter iter;
- int i = 0;
+ int i = 0, num_ctls = 0;
+ bool needs_split_display = false;
memset(&ctls, 0, sizeof(ctls));
+ /* each hw_intf needs its own hw_ctrl to program its control path */
+ num_ctls = top->num_intf;
+
+ needs_split_display = _dpu_rm_needs_split_display(top);
+
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
@@ -621,20 +577,20 @@ static int _dpu_rm_reserve_ctls(
DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
- if (top->needs_split_display != has_split_display)
+ if (needs_split_display != has_split_display)
continue;
ctls[i] = iter.blk;
DPU_DEBUG("ctl %d match\n", iter.blk->id);
- if (++i == top->num_ctl)
+ if (++i == num_ctls)
break;
}
- if (i != top->num_ctl)
+ if (i != num_ctls)
return -ENAVAIL;
- for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->rsvp_nxt = rsvp;
trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
rsvp->enc_id);
@@ -643,55 +599,11 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
-static int _dpu_rm_reserve_cdm(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- uint32_t id,
- enum dpu_hw_blk_type type)
-{
- struct dpu_rm_hw_iter iter;
-
- DRM_DEBUG_KMS("type %d id %d\n", type, id);
-
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
- const struct dpu_cdm_cfg *caps = cdm->caps;
- bool match = false;
-
- if (RESERVED_BY_OTHER(iter.blk, rsvp))
- continue;
-
- if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
- match = test_bit(id, &caps->intf_connect);
-
- DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
- iter.blk->type, iter.blk->id, rsvp->enc_id,
- caps->intf_connect, match);
-
- if (!match)
- continue;
-
- trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
- rsvp->enc_id);
- iter.blk->rsvp_nxt = rsvp;
- break;
- }
-
- if (!iter.hw) {
- DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
- return -ENAVAIL;
- }
-
- return 0;
-}
-
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
uint32_t id,
- enum dpu_hw_blk_type type,
- bool needs_cdm)
+ enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
int ret = 0;
@@ -719,9 +631,6 @@ static int _dpu_rm_reserve_intf(
return -EINVAL;
}
- if (needs_cdm)
- ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
-
return ret;
}
@@ -738,7 +647,7 @@ static int _dpu_rm_reserve_intf_related_hw(
continue;
id = i + INTF_0;
ret = _dpu_rm_reserve_intf(rm, rsvp, id,
- DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ DPU_HW_BLK_INTF);
if (ret)
return ret;
}
@@ -750,17 +659,14 @@ static int _dpu_rm_make_next_rsvp(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{
int ret;
- struct dpu_rm_topology_def topology;
/* Create reservation info, tag reserved blocks with it as we go */
rsvp->seq = ++rm->rsvp_next_seq;
rsvp->enc_id = enc->base.id;
- rsvp->topology = reqs->topology->top_name;
list_add_tail(&rsvp->list, &rm->rsvps);
ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
@@ -769,23 +675,12 @@ static int _dpu_rm_make_next_rsvp(
return ret;
}
- /*
- * Do assignment preferring to give away low-resource CTLs first:
- * - Check mixers without Split Display
- * - Only then allow to grab from CTLs with split display capability
- */
- _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
- if (ret && !reqs->topology->needs_split_display) {
- memcpy(&topology, reqs->topology, sizeof(topology));
- topology.needs_split_display = true;
- _dpu_rm_reserve_ctls(rm, rsvp, &topology);
- }
+ ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
if (ret)
return ret;
@@ -797,44 +692,16 @@ static int _dpu_rm_populate_requirements(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
- int i;
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
- memset(reqs, 0, sizeof(*reqs));
+ reqs->topology = req_topology;
- dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
-
- for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
- if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
- req_topology)) {
- reqs->topology = &g_top_table[i];
- break;
- }
- }
-
- if (!reqs->topology) {
- DPU_ERROR("invalid topology for the display\n");
- return -EINVAL;
- }
-
- /**
- * Set the requirement based on caps if not set from user space
- * This will ensure to select LM tied with DS blocks
- * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
- */
- if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
- conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
- reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
-
- DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
- reqs->hw_res.display_num_of_h_tiles);
- DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
- reqs->topology->num_lm, reqs->topology->num_ctl,
- reqs->topology->top_name,
- reqs->topology->needs_split_display);
+ DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_enc,
+ reqs->topology.num_intf);
return 0;
}
@@ -860,29 +727,12 @@ static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
return NULL;
}
-static struct drm_connector *_dpu_rm_get_connector(
- struct drm_encoder *enc)
-{
- struct drm_connector *conn = NULL;
- struct list_head *connector_list =
- &enc->dev->mode_config.connector_list;
-
- list_for_each_entry(conn, connector_list, head)
- if (conn->encoder == enc)
- return conn;
-
- return NULL;
-}
-
/**
* _dpu_rm_release_rsvp - release resources and release a reservation
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-static void _dpu_rm_release_rsvp(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct drm_connector *conn)
+static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk;
@@ -923,7 +773,6 @@ static void _dpu_rm_release_rsvp(
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{
struct dpu_rm_rsvp *rsvp;
- struct drm_connector *conn;
if (!rm || !enc) {
DPU_ERROR("invalid params\n");
@@ -938,25 +787,15 @@ void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
goto end;
}
- conn = _dpu_rm_get_connector(enc);
- if (!conn) {
- DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
- goto end;
- }
-
- _dpu_rm_release_rsvp(rm, rsvp, conn);
+ _dpu_rm_release_rsvp(rm, rsvp);
end:
mutex_unlock(&rm->rm_lock);
}
-static int _dpu_rm_commit_rsvp(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct drm_connector_state *conn_state)
+static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
- int ret = 0;
/* Swap next rsvp to be the active */
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
@@ -967,19 +806,12 @@ static int _dpu_rm_commit_rsvp(
}
}
}
-
- if (!ret)
- DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
- rsvp->topology);
-
- return ret;
}
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only)
{
@@ -987,25 +819,19 @@ int dpu_rm_reserve(
struct dpu_rm_requirements reqs;
int ret;
- if (!rm || !enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
- DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
- conn_state->connector->base.id, enc->base.id,
- crtc_state->crtc->base.id, test_only);
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
+ enc->base.id, crtc_state->crtc->base.id, test_only);
mutex_lock(&rm->rm_lock);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
- conn_state, &reqs, topology);
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
+ topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
goto end;
@@ -1030,28 +856,15 @@ int dpu_rm_reserve(
rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
- /*
- * User can request that we clear out any reservation during the
- * atomic_check phase by using this CLEAR bit
- */
- if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
- DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
- rsvp_cur->seq, rsvp_cur->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
- rsvp_cur = NULL;
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
- }
-
/* Check the proposed reservation, store it in hw's "next" field */
- ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
- rsvp_nxt, &reqs);
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
- } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ _dpu_rm_release_rsvp(rm, rsvp_nxt);
+ } else if (test_only) {
/*
* Normally, if test_only, test the reservation and then undo
* However, if the user requests LOCK, then keep the reservation
@@ -1059,15 +872,11 @@ int dpu_rm_reserve(
*/
DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
rsvp_nxt->seq, rsvp_nxt->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt);
} else {
- if (test_only && RM_RQ_LOCK(&reqs))
- DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
- rsvp_nxt->seq, rsvp_nxt->enc_id);
-
- _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ _dpu_rm_release_rsvp(rm, rsvp_cur);
- ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ _dpu_rm_commit_rsvp(rm, rsvp_nxt);
}
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index ffd1841a6067..b8273bd23801 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -21,39 +21,6 @@
#include "dpu_hw_top.h"
/**
- * enum dpu_rm_topology_name - HW resource use case in use by connector
- * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
- * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
- * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
- * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
- */
-enum dpu_rm_topology_name {
- DPU_RM_TOPOLOGY_NONE = 0,
- DPU_RM_TOPOLOGY_SINGLEPIPE,
- DPU_RM_TOPOLOGY_DUALPIPE,
- DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
- DPU_RM_TOPOLOGY_MAX,
-};
-
-/**
- * enum dpu_rm_topology_control - HW resource use case in use by connector
- * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
- * test, reserve the resources for this display.
- * Normal behavior would not impact the reservation
- * list during the AtomicTest phase.
- * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
- * release any reservation held by this display.
- * Normal behavior would not impact the
- * reservation list during the AtomicTest phase.
- * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
- */
-enum dpu_rm_topology_control {
- DPU_RM_TOPCTL_RESERVE_LOCK,
- DPU_RM_TOPCTL_RESERVE_CLEAR,
- DPU_RM_TOPCTL_DS,
-};
-
-/**
* struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes
* @rsvps: list of hardware reservations by each crtc->encoder->connector
@@ -125,7 +92,6 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @rm: DPU Resource Manager handle
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
- * @conn_state: Proposed Atomic DRM Connector State handle
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
@@ -133,7 +99,6 @@ int dpu_rm_destroy(struct dpu_rm *rm);
int dpu_rm_reserve(struct dpu_rm *rm,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only);
@@ -187,13 +152,4 @@ bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
*/
int dpu_rm_check_property_topctl(uint64_t val);
-/**
- * dpu_rm_get_topology_name - returns the name of the the given topology
- * definition
- * @topology: topology definition
- * @Return: name of the topology
- */
-enum dpu_rm_topology_name
-dpu_rm_get_topology_name(struct msm_display_topology topology);
-
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index ae0ca5076238..e12c4cefb742 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -468,14 +468,16 @@ TRACE_EVENT(dpu_enc_frame_done_cb,
TRACE_EVENT(dpu_enc_trigger_flush,
TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
- int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
+ u32 pending_flush_ret),
TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
- pending_flush_ret),
+ extra_flush_bits, pending_flush_ret),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intf, intf_idx )
__field( int, pending_kickoff_cnt )
__field( int, ctl_idx )
+ __field( u32, extra_flush_bits )
__field( u32, pending_flush_ret )
),
TP_fast_assign(
@@ -483,12 +485,14 @@ TRACE_EVENT(dpu_enc_trigger_flush,
__entry->intf_idx = intf_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
__entry->ctl_idx = ctl_idx;
+ __entry->extra_flush_bits = extra_flush_bits;
__entry->pending_flush_ret = pending_flush_ret;
),
TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
- "pending_flush_ret=%u", __entry->drm_id,
- __entry->intf_idx, __entry->pending_kickoff_cnt,
- __entry->ctl_idx, __entry->pending_flush_ret)
+ "extra_flush_bits=0x%x pending_flush_ret=0x%x",
+ __entry->drm_id, __entry->intf_idx,
+ __entry->pending_kickoff_cnt, __entry->ctl_idx,
+ __entry->extra_flush_bits, __entry->pending_flush_ret)
);
DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
@@ -682,37 +686,41 @@ TRACE_EVENT(dpu_crtc_setup_mixer,
TP_STRUCT__entry(
__field( uint32_t, crtc_id )
__field( uint32_t, plane_id )
- __field( struct drm_plane_state*,state )
- __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, fb_id )
+ __field_struct( struct drm_rect, src_rect )
+ __field_struct( struct drm_rect, dst_rect )
__field( uint32_t, stage_idx )
+ __field( enum dpu_stage, stage )
__field( enum dpu_sspp, sspp )
+ __field( uint32_t, multirect_idx )
+ __field( uint32_t, multirect_mode )
__field( uint32_t, pixel_format )
__field( uint64_t, modifier )
),
TP_fast_assign(
__entry->crtc_id = crtc_id;
__entry->plane_id = plane_id;
- __entry->state = state;
- __entry->pstate = pstate;
+ __entry->fb_id = state ? state->fb->base.id : 0;
+ __entry->src_rect = drm_plane_state_src(state);
+ __entry->dst_rect = drm_plane_state_dest(state);
__entry->stage_idx = stage_idx;
+ __entry->stage = pstate->stage;
__entry->sspp = sspp;
+ __entry->multirect_idx = pstate->multirect_index;
+ __entry->multirect_mode = pstate->multirect_mode;
__entry->pixel_format = pixel_format;
__entry->modifier = modifier;
),
- TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
- "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT
+ " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d "
"multirect_index:%d multirect_mode:%u pix_format:%u "
"modifier:%llu",
- __entry->crtc_id, __entry->plane_id,
- __entry->state->fb ? __entry->state->fb->base.id : -1,
- __entry->state->src_w >> 16, __entry->state->src_h >> 16,
- __entry->state->src_x >> 16, __entry->state->src_y >> 16,
- __entry->state->crtc_w, __entry->state->crtc_h,
- __entry->state->crtc_x, __entry->state->crtc_y,
- __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
- __entry->pstate->multirect_index,
- __entry->pstate->multirect_mode, __entry->pixel_format,
- __entry->modifier)
+ __entry->crtc_id, __entry->plane_id, __entry->fb_id,
+ DRM_RECT_FP_ARG(&__entry->src_rect),
+ DRM_RECT_ARG(&__entry->dst_rect),
+ __entry->stage_idx, __entry->stage, __entry->sspp,
+ __entry->multirect_idx, __entry->multirect_mode,
+ __entry->pixel_format, __entry->modifier)
);
TRACE_EVENT(dpu_crtc_setup_lm_bounds,
@@ -721,15 +729,15 @@ TRACE_EVENT(dpu_crtc_setup_lm_bounds,
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( int, mixer )
- __field( struct drm_rect *, bounds )
+ __field_struct( struct drm_rect, bounds )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->mixer = mixer;
- __entry->bounds = bounds;
+ __entry->bounds = *bounds;
),
TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
- __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+ __entry->mixer, DRM_RECT_ARG(&__entry->bounds))
);
TRACE_EVENT(dpu_crtc_vblank_enable,
@@ -740,21 +748,25 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, drm_id )
__field( uint32_t, enc_id )
__field( bool, enable )
- __field( struct dpu_crtc *, crtc )
+ __field( bool, enabled )
+ __field( bool, suspend )
+ __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
- __entry->crtc = crtc;
+ __entry->enabled = crtc->enabled;
+ __entry->suspend = crtc->suspend;
+ __entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
"vblank_req:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
- __entry->crtc->enabled ? "true" : "false",
- __entry->crtc->suspend ? "true" : "false",
- __entry->crtc->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false",
+ __entry->suspend ? "true" : "false",
+ __entry->vblank_requested ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -763,18 +775,22 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( bool, enable )
- __field( struct dpu_crtc *, crtc )
+ __field( bool, enabled )
+ __field( bool, suspend )
+ __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
- __entry->crtc = crtc;
+ __entry->enabled = crtc->enabled;
+ __entry->suspend = crtc->suspend;
+ __entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
- __entry->crtc->enabled ? "true" : "false",
- __entry->crtc->suspend ? "true" : "false",
- __entry->crtc->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false",
+ __entry->suspend ? "true" : "false",
+ __entry->vblank_requested ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -814,24 +830,24 @@ TRACE_EVENT(dpu_plane_set_scanout,
TP_ARGS(index, layout, multirect_index),
TP_STRUCT__entry(
__field( enum dpu_sspp, index )
- __field( struct dpu_hw_fmt_layout*, layout )
+ __field_struct( struct dpu_hw_fmt_layout, layout )
__field( enum dpu_sspp_multirect_index, multirect_index)
),
TP_fast_assign(
__entry->index = index;
- __entry->layout = layout;
+ __entry->layout = *layout;
__entry->multirect_index = multirect_index;
),
TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
- "multirect_index:%d", __entry->index, __entry->layout->width,
- __entry->layout->height, __entry->layout->plane_addr[0],
- __entry->layout->plane_size[0],
- __entry->layout->plane_addr[1],
- __entry->layout->plane_size[1],
- __entry->layout->plane_addr[2],
- __entry->layout->plane_size[2],
- __entry->layout->plane_addr[3],
- __entry->layout->plane_size[3], __entry->multirect_index)
+ "multirect_index:%d", __entry->index, __entry->layout.width,
+ __entry->layout.height, __entry->layout.plane_addr[0],
+ __entry->layout.plane_size[0],
+ __entry->layout.plane_addr[1],
+ __entry->layout.plane_size[1],
+ __entry->layout.plane_addr[2],
+ __entry->layout.plane_size[2],
+ __entry->layout.plane_addr[3],
+ __entry->layout.plane_size[3], __entry->multirect_index)
);
TRACE_EVENT(dpu_plane_disable,
@@ -868,10 +884,6 @@ DECLARE_EVENT_CLASS(dpu_rm_iter_template,
TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
__entry->enc_id)
);
-DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
-);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
TP_ARGS(id, type, enc_id)
@@ -979,16 +991,16 @@ TRACE_EVENT(dpu_core_perf_update_clk,
TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
TP_ARGS(dev, stop_req, clk_rate),
TP_STRUCT__entry(
- __field( struct drm_device *, dev )
+ __string( dev_name, dev->unique )
__field( bool, stop_req )
__field( u64, clk_rate )
),
TP_fast_assign(
- __entry->dev = dev;
+ __assign_str(dev_name, dev->unique);
__entry->stop_req = stop_req;
__entry->clk_rate = clk_rate;
),
- TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name),
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index d5e4f0de321a..310459541e48 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -184,7 +184,7 @@ static void mdp5_plane_reset(struct drm_plane *plane)
struct mdp5_plane_state *mdp5_state;
if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ drm_framebuffer_put(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
@@ -227,7 +227,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
if (state->fb)
- drm_framebuffer_unreference(state->fb);
+ drm_framebuffer_put(state->fb);
kfree(pstate);
}
@@ -258,7 +258,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
msm_framebuffer_cleanup(fb, kms->aspace);
}
-#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ff8164cc6738..a9768f823290 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -83,6 +83,7 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
+ msm_dsi->id = -1;
msm_dsi->pdev = pdev;
platform_set_drvdata(pdev, msm_dsi);
@@ -117,8 +118,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
DBG("");
msm_dsi = dsi_init(pdev);
- if (IS_ERR(msm_dsi))
- return PTR_ERR(msm_dsi);
+ if (IS_ERR(msm_dsi)) {
+ /* Don't fail the bind if the dsi port is not connected */
+ if (PTR_ERR(msm_dsi) == -ENODEV)
+ return 0;
+ else
+ return PTR_ERR(msm_dsi);
+ }
priv->dsi[msm_dsi->id] = msm_dsi;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 96fb5f635314..9c6c523eacdc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1750,6 +1750,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
if (ret) {
dev_err(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
+ ret = -EINVAL;
goto err;
}
@@ -1757,6 +1758,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
dev_dbg(dev, "%s: no valid device\n", __func__);
+ ret = -ENODEV;
goto err;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 5224010d90e4..80aa6344185e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -839,6 +839,8 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
if (msm_dsi->host)
msm_dsi_host_unregister(msm_dsi->host);
- msm_dsim->dsi[msm_dsi->id] = NULL;
+
+ if (msm_dsi->id >= 0)
+ msm_dsim->dsi[msm_dsi->id] = NULL;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 69dbdba183fe..5e758d95751a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -338,7 +338,7 @@ static int msm_drm_uninit(struct device *dev)
mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
kfree(priv);
@@ -453,7 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
- goto err_unref_drm_dev;
+ goto err_put_drm_dev;
}
ddev->dev_private = priv;
@@ -654,8 +654,8 @@ err_destroy_mdss:
mdss->funcs->destroy(ddev);
err_free_priv:
kfree(priv);
-err_unref_drm_dev:
- drm_dev_unref(ddev);
+err_put_drm_dev:
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8e510d5c758a..9d11f321f5a9 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -62,6 +62,8 @@ struct msm_gem_vma;
#define MAX_BRIDGES 8
#define MAX_CONNECTORS 8
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 7bd83e0afa97..7a7923e6220d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -144,7 +144,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
goto out_unlock;
}
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = msm_obj;
@@ -396,7 +396,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
- drm_gem_object_unreference(&msm_obj->base);
+ drm_gem_object_put(&msm_obj->base);
}
ww_acquire_fini(&submit->ticket);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5e808cfec345..11aac8337066 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -41,7 +41,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
if (IS_ERR(opp))
return PTR_ERR(opp);
- clk_set_rate(gpu->core_clk, *freq);
+ if (gpu->funcs->gpu_set_freq)
+ gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
+ else
+ clk_set_rate(gpu->core_clk, *freq);
+
dev_pm_opp_put(opp);
return 0;
@@ -51,16 +55,14 @@ static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
- u64 cycles;
- u32 freq = ((u32) status->current_frequency) / 1000000;
ktime_t time;
- status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
- gpu->funcs->gpu_busy(gpu, &cycles);
-
- status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
+ if (gpu->funcs->gpu_get_freq)
+ status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
+ else
+ status->current_frequency = clk_get_rate(gpu->core_clk);
- gpu->devfreq.busy_cycles = cycles;
+ status->busy_time = gpu->funcs->gpu_busy(gpu);
time = ktime_get();
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
@@ -73,7 +75,10 @@ static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
- *freq = (unsigned long) clk_get_rate(gpu->core_clk);
+ if (gpu->funcs->gpu_get_freq)
+ *freq = gpu->funcs->gpu_get_freq(gpu);
+ else
+ *freq = clk_get_rate(gpu->core_clk);
return 0;
}
@@ -88,7 +93,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy || !gpu->core_clk)
+ if (!gpu->funcs->gpu_busy)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -105,6 +110,8 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
+
+ devfreq_suspend_device(gpu->devfreq.devfreq);
}
static int enable_pwrrail(struct msm_gpu *gpu)
@@ -184,6 +191,14 @@ static int disable_axi(struct msm_gpu *gpu)
return 0;
}
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
+{
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+}
+
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
@@ -202,12 +217,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- if (gpu->devfreq.devfreq) {
- gpu->devfreq.busy_cycles = 0;
- gpu->devfreq.time = ktime_get();
-
- devfreq_resume_device(gpu->devfreq.devfreq);
- }
+ msm_gpu_resume_devfreq(gpu);
gpu->needs_hw_init = true;
@@ -220,8 +230,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- if (gpu->devfreq.devfreq)
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ devfreq_suspend_device(gpu->devfreq.devfreq);
ret = disable_axi(gpu);
if (ret)
@@ -367,8 +376,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
-static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
- char *cmd)
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
{
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 9122ee6e55e4..f82bac086666 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -70,9 +70,11 @@ struct msm_gpu_funcs {
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
- int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
+ unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
+ void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
};
struct msm_gpu {
@@ -264,6 +266,7 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu);
int msm_gpu_hw_init(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3aa8a8576abe..cca933458439 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -366,7 +366,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
va_list args;
va_start(args, fmt);
- n = vsnprintf(msg, sizeof(msg), fmt, args);
+ n = vscnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
@@ -375,11 +375,11 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
- n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
+ n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 1dbd1dcdcf15..5f163a025e89 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -36,6 +36,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_scdc_helper.h>
#include <drm/drm_edid.h>
#include <nvif/class.h>
@@ -531,6 +532,7 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
static void
nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
@@ -548,9 +550,12 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
.pwr.rekey = 56, /* binary driver, and tegra, constant */
};
struct nouveau_connector *nv_connector;
+ struct drm_hdmi_info *hdmi;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
union hdmi_infoframe vendor_frame;
+ bool scdc_supported, high_tmds_clock_ratio = false, scrambling = false;
+ u8 config;
int ret;
int size;
@@ -558,8 +563,11 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
+ hdmi = &nv_connector->base.display_info.hdmi;
+ scdc_supported = hdmi->scdc.supported;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
- false);
+ scdc_supported);
if (!ret) {
/* We have an AVI InfoFrame, populate it to the display */
args.pwr.avi_infoframe_length
@@ -582,12 +590,42 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
max_ac_packet -= 18; /* constant from tegra */
args.pwr.max_ac_packet = max_ac_packet / 32;
+ if (hdmi->scdc.scrambling.supported) {
+ high_tmds_clock_ratio = mode->clock > 340000;
+ scrambling = high_tmds_clock_ratio ||
+ hdmi->scdc.scrambling.low_rates;
+ }
+
+ args.pwr.scdc =
+ NV50_DISP_SOR_HDMI_PWR_V0_SCDC_SCRAMBLE * scrambling |
+ NV50_DISP_SOR_HDMI_PWR_V0_SCDC_DIV_BY_4 * high_tmds_clock_ratio;
+
size = sizeof(args.base)
+ sizeof(args.pwr)
+ args.pwr.avi_infoframe_length
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
+
nv50_audio_enable(encoder, mode);
+
+ /* If SCDC is supported by the downstream monitor, update
+ * divider / scrambling settings to what we programmed above.
+ */
+ if (!hdmi->scdc.scrambling.supported)
+ return;
+
+ ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
+ return;
+ }
+ config &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
+ config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 * high_tmds_clock_ratio;
+ config |= SCDC_SCRAMBLING_ENABLE * scrambling;
+ ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, config);
+ if (ret < 0)
+ NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
+ config, ret);
}
/******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index 7cdf53615d7b..bced81987269 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -69,7 +69,10 @@ struct nv50_disp_sor_hdmi_pwr_v0 {
__u8 rekey;
__u8 avi_infoframe_length;
__u8 vendor_infoframe_length;
- __u8 pad06[2];
+#define NV50_DISP_SOR_HDMI_PWR_V0_SCDC_SCRAMBLE (1 << 0)
+#define NV50_DISP_SOR_HDMI_PWR_V0_SCDC_DIV_BY_4 (1 << 1)
+ __u8 scdc;
+ __u8 pad07[1];
};
struct nv50_disp_sor_lvds_script_v0 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 408b955e5c39..5f5be6368aed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -37,18 +37,19 @@
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
static struct ida bl_ida;
#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
-struct backlight_connector {
- struct list_head head;
+struct nouveau_backlight {
+ struct backlight_device *dev;
int id;
};
static bool
-nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct backlight_connector
- *connector)
+nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
+ struct nouveau_backlight *bl)
{
const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
if (nb < 0 || nb >= 100)
@@ -57,17 +58,18 @@ nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct backlight_c
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
else
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight");
- connector->id = nb;
+ bl->id = nb;
return true;
}
static int
nv40_get_intensity(struct backlight_device *bd)
{
- struct nouveau_drm *drm = bl_get_data(bd);
+ struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+ struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
- NV40_PMC_BACKLIGHT_MASK) >> 16;
+ NV40_PMC_BACKLIGHT_MASK) >> 16;
return val;
}
@@ -75,13 +77,14 @@ nv40_get_intensity(struct backlight_device *bd)
static int
nv40_set_intensity(struct backlight_device *bd)
{
- struct nouveau_drm *drm = bl_get_data(bd);
+ struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+ struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
int val = bd->props.brightness;
int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
nvif_wr32(device, NV40_PMC_BACKLIGHT,
- (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
+ (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
return 0;
}
@@ -93,38 +96,19 @@ static const struct backlight_ops nv40_bl_ops = {
};
static int
-nv40_backlight_init(struct drm_connector *connector)
+nv40_backlight_init(struct nouveau_encoder *encoder,
+ struct backlight_properties *props,
+ const struct backlight_ops **ops)
{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_drm *drm = nouveau_drm(encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- struct backlight_properties props;
- struct backlight_device *bd;
- struct backlight_connector bl_connector;
- char backlight_name[BL_NAME_SIZE];
if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
- return 0;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 31;
- if (!nouveau_get_backlight_name(backlight_name, &bl_connector)) {
- NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
- return 0;
- }
- bd = backlight_device_register(backlight_name , connector->kdev, drm,
- &nv40_bl_ops, &props);
-
- if (IS_ERR(bd)) {
- if (bl_connector.id > 0)
- ida_simple_remove(&bl_ida, bl_connector.id);
- return PTR_ERR(bd);
- }
- list_add(&bl_connector.head, &drm->bl_connectors);
- drm->backlight = bd;
- bd->props.brightness = nv40_get_intensity(bd);
- backlight_update_status(bd);
+ return -ENODEV;
+ props->type = BACKLIGHT_RAW;
+ props->max_brightness = 31;
+ *ops = &nv40_bl_ops;
return 0;
}
@@ -154,7 +138,7 @@ nv50_set_intensity(struct backlight_device *bd)
u32 val = (bd->props.brightness * div) / 100;
nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
- NV50_PDISP_SOR_PWM_CTL_NEW | val);
+ NV50_PDISP_SOR_PWM_CTL_NEW | val);
return 0;
}
@@ -194,9 +178,10 @@ nva3_set_intensity(struct backlight_device *bd)
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
val = (bd->props.brightness * div) / 100;
if (div) {
- nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
- NV50_PDISP_SOR_PWM_CTL_NEW |
- NVA3_PDISP_SOR_PWM_CTL_UNK);
+ nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
+ val |
+ NV50_PDISP_SOR_PWM_CTL_NEW |
+ NVA3_PDISP_SOR_PWM_CTL_UNK);
return 0;
}
@@ -210,110 +195,119 @@ static const struct backlight_ops nva3_bl_ops = {
};
static int
-nv50_backlight_init(struct drm_connector *connector)
+nv50_backlight_init(struct nouveau_encoder *nv_encoder,
+ struct backlight_properties *props,
+ const struct backlight_ops **ops)
{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- struct nouveau_encoder *nv_encoder;
- struct backlight_properties props;
- struct backlight_device *bd;
- const struct backlight_ops *ops;
- struct backlight_connector bl_connector;
- char backlight_name[BL_NAME_SIZE];
-
- nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
- if (!nv_encoder) {
- nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
- if (!nv_encoder)
- return -ENODEV;
- }
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
- return 0;
+ return -ENODEV;
if (drm->client.device.info.chipset <= 0xa0 ||
drm->client.device.info.chipset == 0xaa ||
drm->client.device.info.chipset == 0xac)
- ops = &nv50_bl_ops;
+ *ops = &nv50_bl_ops;
else
- ops = &nva3_bl_ops;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 100;
- if (!nouveau_get_backlight_name(backlight_name, &bl_connector)) {
- NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
- return 0;
- }
- bd = backlight_device_register(backlight_name , connector->kdev,
- nv_encoder, ops, &props);
+ *ops = &nva3_bl_ops;
- if (IS_ERR(bd)) {
- if (bl_connector.id > 0)
- ida_simple_remove(&bl_ida, bl_connector.id);
- return PTR_ERR(bd);
- }
+ props->type = BACKLIGHT_RAW;
+ props->max_brightness = 100;
- list_add(&bl_connector.head, &drm->bl_connectors);
- drm->backlight = bd;
- bd->props.brightness = bd->ops->get_brightness(bd);
- backlight_update_status(bd);
return 0;
}
int
-nouveau_backlight_init(struct drm_device *dev)
+nouveau_backlight_init(struct drm_connector *connector)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct nouveau_backlight *bl;
+ struct nouveau_encoder *nv_encoder = NULL;
struct nvif_device *device = &drm->client.device;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- INIT_LIST_HEAD(&drm->bl_connectors);
+ char backlight_name[BL_NAME_SIZE];
+ struct backlight_properties props = {0};
+ const struct backlight_ops *ops;
+ int ret;
if (apple_gmux_present()) {
- NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
+ NV_INFO_ONCE(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
return 0;
}
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
- connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
- switch (device->info.family) {
- case NV_DEVICE_INFO_V0_CURIE:
- return nv40_backlight_init(connector);
- case NV_DEVICE_INFO_V0_TESLA:
- case NV_DEVICE_INFO_V0_FERMI:
- case NV_DEVICE_INFO_V0_KEPLER:
- case NV_DEVICE_INFO_V0_MAXWELL:
- return nv50_backlight_init(connector);
- default:
- break;
- }
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
+ else
+ return 0;
+
+ if (!nv_encoder)
+ return 0;
+
+ switch (device->info.family) {
+ case NV_DEVICE_INFO_V0_CURIE:
+ ret = nv40_backlight_init(nv_encoder, &props, &ops);
+ break;
+ case NV_DEVICE_INFO_V0_TESLA:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
+ ret = nv50_backlight_init(nv_encoder, &props, &ops);
+ break;
+ default:
+ return 0;
}
- drm_connector_list_iter_end(&conn_iter);
+
+ if (ret == -ENODEV)
+ return 0;
+ else if (ret)
+ return ret;
+
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ if (!bl)
+ return -ENOMEM;
+
+ if (!nouveau_get_backlight_name(backlight_name, bl)) {
+ NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
+ goto fail_alloc;
+ }
+
+ bl->dev = backlight_device_register(backlight_name, connector->kdev,
+ nv_encoder, ops, &props);
+ if (IS_ERR(bl->dev)) {
+ if (bl->id >= 0)
+ ida_simple_remove(&bl_ida, bl->id);
+ ret = PTR_ERR(bl->dev);
+ goto fail_alloc;
+ }
+
+ nouveau_connector(connector)->backlight = bl;
+ bl->dev->props.brightness = bl->dev->ops->get_brightness(bl->dev);
+ backlight_update_status(bl->dev);
return 0;
+
+fail_alloc:
+ kfree(bl);
+ return ret;
}
void
-nouveau_backlight_exit(struct drm_device *dev)
+nouveau_backlight_fini(struct drm_connector *connector)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct backlight_connector *connector;
+ struct nouveau_connector *nv_conn = nouveau_connector(connector);
+ struct nouveau_backlight *bl = nv_conn->backlight;
- list_for_each_entry(connector, &drm->bl_connectors, head) {
- if (connector->id >= 0)
- ida_simple_remove(&bl_ida, connector->id);
- }
+ if (!bl)
+ return;
- if (drm->backlight) {
- backlight_device_unregister(drm->backlight);
- drm->backlight = NULL;
- }
+ if (bl->id >= 0)
+ ida_simple_remove(&bl_ida, bl->id);
+
+ backlight_device_unregister(bl->dev);
+ nv_conn->backlight = NULL;
+ kfree(bl);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index dde8724aa8f5..fd80661dff92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -886,6 +886,22 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
}
static int
+nouveau_connector_late_register(struct drm_connector *connector)
+{
+ int ret;
+
+ ret = nouveau_backlight_init(connector);
+
+ return ret;
+}
+
+static void
+nouveau_connector_early_unregister(struct drm_connector *connector)
+{
+ nouveau_backlight_fini(connector);
+}
+
+static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -953,18 +969,33 @@ nouveau_connector_get_modes(struct drm_connector *connector)
}
static unsigned
-get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
+get_tmds_link_bandwidth(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
+ struct drm_display_info *info = NULL;
+ const unsigned duallink_scale =
+ nouveau_duallink && nv_encoder->dcb->duallink_possible ? 2 : 1;
+
+ if (drm_detect_hdmi_monitor(nv_connector->edid))
+ info = &nv_connector->base.display_info;
- if (hdmi) {
+ if (info) {
if (nouveau_hdmimhz > 0)
return nouveau_hdmimhz * 1000;
/* Note: these limits are conservative, some Fermi's
* can do 297 MHz. Unclear how this can be determined.
*/
+ if (drm->client.device.info.chipset >= 0x120) {
+ const int max_tmds_clock =
+ info->hdmi.scdc.scrambling.supported ?
+ 594000 : 340000;
+ return info->max_tmds_clock ?
+ min(info->max_tmds_clock, max_tmds_clock) :
+ max_tmds_clock;
+ }
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
return 297000;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
@@ -972,13 +1003,13 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
}
if (dcb->location != DCB_LOC_ON_CHIP ||
drm->client.device.info.chipset >= 0x46)
- return 165000;
+ return 165000 * duallink_scale;
else if (drm->client.device.info.chipset >= 0x40)
- return 155000;
+ return 155000 * duallink_scale;
else if (drm->client.device.info.chipset >= 0x18)
- return 135000;
+ return 135000 * duallink_scale;
else
- return 112000;
+ return 112000 * duallink_scale;
}
static enum drm_mode_status
@@ -990,7 +1021,6 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
unsigned clock = mode->clock;
- bool hdmi;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
@@ -1003,11 +1033,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 400000;
break;
case DCB_OUTPUT_TMDS:
- hdmi = drm_detect_hdmi_monitor(nv_connector->edid);
- max_clock = get_tmds_link_bandwidth(connector, hdmi);
- if (!hdmi && nouveau_duallink &&
- nv_encoder->dcb->duallink_possible)
- max_clock *= 2;
+ max_clock = get_tmds_link_bandwidth(connector);
break;
case DCB_OUTPUT_ANALOG:
max_clock = nv_encoder->dcb->crtconf.maxfreq;
@@ -1069,6 +1095,8 @@ nouveau_connector_funcs = {
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
+ .late_register = nouveau_connector_late_register,
+ .early_unregister = nouveau_connector_early_unregister,
};
static const struct drm_connector_funcs
@@ -1084,6 +1112,8 @@ nouveau_connector_funcs_lvds = {
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
+ .late_register = nouveau_connector_late_register,
+ .early_unregister = nouveau_connector_early_unregister,
};
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 0acc07555bcd..f57ef35b1e5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -39,6 +39,10 @@
struct nvkm_i2c_port;
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+struct nouveau_backlight;
+#endif
+
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -55,6 +59,9 @@ struct nouveau_connector {
struct nouveau_encoder *detected_encoder;
struct edid *edid;
struct drm_display_mode *native_mode;
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+ struct nouveau_backlight *backlight;
+#endif
};
static inline struct nouveau_connector *nouveau_connector(
@@ -181,4 +188,30 @@ int nouveau_conn_atomic_get_property(struct drm_connector *,
const struct drm_connector_state *,
struct drm_property *, u64 *);
struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
+
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+extern int nouveau_backlight_init(struct drm_connector *);
+extern void nouveau_backlight_fini(struct drm_connector *);
+extern void nouveau_backlight_ctor(void);
+extern void nouveau_backlight_dtor(void);
+#else
+static inline int
+nouveau_backlight_init(struct drm_connector *connector)
+{
+ return 0;
+}
+
+static inline void
+nouveau_backlight_fini(struct drm_connector *connector) {
+}
+
+static inline void
+nouveau_backlight_ctor(void) {
+}
+
+static inline void
+nouveau_backlight_dtor(void) {
+}
+#endif
+
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 540c0cbbfcee..f326ffd86766 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -582,7 +582,6 @@ nouveau_display_create(struct drm_device *dev)
goto vblank_err;
}
- nouveau_backlight_init(dev);
INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
#ifdef CONFIG_ACPI
drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
@@ -607,7 +606,6 @@ nouveau_display_destroy(struct drm_device *dev)
#ifdef CONFIG_ACPI
unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
#endif
- nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index ff92b54ce448..eb77e41c2d4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -85,31 +85,6 @@ int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
-#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_device *);
-extern void nouveau_backlight_exit(struct drm_device *);
-extern void nouveau_backlight_ctor(void);
-extern void nouveau_backlight_dtor(void);
-#else
-static inline int
-nouveau_backlight_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void
-nouveau_backlight_exit(struct drm_device *dev) {
-}
-
-static inline void
-nouveau_backlight_ctor(void) {
-}
-
-static inline void
-nouveau_backlight_dtor(void) {
-}
-#endif
-
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
const struct drm_mode_fb_cmd2 *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 74d2283f2c28..2b2baf6e0e0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -458,75 +458,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
nouveau_bo_move_init(drm);
}
-static int nouveau_drm_probe(struct pci_dev *pdev,
- const struct pci_device_id *pent)
-{
- struct nvkm_device *device;
- struct apertures_struct *aper;
- bool boot = false;
- int ret;
-
- if (vga_switcheroo_client_probe_defer(pdev))
- return -EPROBE_DEFER;
-
- /* We need to check that the chipset is supported before booting
- * fbdev off the hardware, as there's no way to put it back.
- */
- ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
- if (ret)
- return ret;
-
- nvkm_device_del(&device);
-
- /* Remove conflicting drivers (vesafb, efifb etc). */
- aper = alloc_apertures(3);
- if (!aper)
- return -ENOMEM;
-
- aper->ranges[0].base = pci_resource_start(pdev, 1);
- aper->ranges[0].size = pci_resource_len(pdev, 1);
- aper->count = 1;
-
- if (pci_resource_len(pdev, 2)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
- aper->count++;
- }
-
- if (pci_resource_len(pdev, 3)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
- aper->count++;
- }
-
-#ifdef CONFIG_X86
- boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- if (nouveau_modeset != 2)
- drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
- kfree(aper);
-
- ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, &device);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- if (nouveau_atomic)
- driver_pci.driver_features |= DRIVER_ATOMIC;
-
- ret = drm_get_pci_dev(pdev, pent, &driver_pci);
- if (ret) {
- nvkm_device_del(&device);
- return ret;
- }
-
- return 0;
-}
-
static int
-nouveau_drm_load(struct drm_device *dev, unsigned long flags)
+nouveau_drm_device_init(struct drm_device *dev)
{
struct nouveau_drm *drm;
int ret;
@@ -538,11 +471,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
- return ret;
+ goto fail_alloc;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
- return ret;
+ goto fail_master;
dev->irq_enabled = true;
@@ -605,13 +538,15 @@ fail_bios:
fail_ttm:
nouveau_vga_fini(drm);
nouveau_cli_fini(&drm->client);
+fail_master:
nouveau_cli_fini(&drm->master);
+fail_alloc:
kfree(drm);
return ret;
}
static void
-nouveau_drm_unload(struct drm_device *dev)
+nouveau_drm_device_fini(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -640,18 +575,116 @@ nouveau_drm_unload(struct drm_device *dev)
kfree(drm);
}
+static int nouveau_drm_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
+{
+ struct nvkm_device *device;
+ struct drm_device *drm_dev;
+ struct apertures_struct *aper;
+ bool boot = false;
+ int ret;
+
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return -EPROBE_DEFER;
+
+ /* We need to check that the chipset is supported before booting
+ * fbdev off the hardware, as there's no way to put it back.
+ */
+ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ if (ret)
+ return ret;
+
+ nvkm_device_del(&device);
+
+ /* Remove conflicting drivers (vesafb, efifb etc). */
+ aper = alloc_apertures(3);
+ if (!aper)
+ return -ENOMEM;
+
+ aper->ranges[0].base = pci_resource_start(pdev, 1);
+ aper->ranges[0].size = pci_resource_len(pdev, 1);
+ aper->count = 1;
+
+ if (pci_resource_len(pdev, 2)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+ aper->count++;
+ }
+
+ if (pci_resource_len(pdev, 3)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+ aper->count++;
+ }
+
+#ifdef CONFIG_X86
+ boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ if (nouveau_modeset != 2)
+ drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ kfree(aper);
+
+ ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
+ true, true, ~0ULL, &device);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ if (nouveau_atomic)
+ driver_pci.driver_features |= DRIVER_ATOMIC;
+
+ drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
+ if (IS_ERR(drm_dev)) {
+ ret = PTR_ERR(drm_dev);
+ goto fail_nvkm;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto fail_drm;
+
+ drm_dev->pdev = pdev;
+ pci_set_drvdata(pdev, drm_dev);
+
+ ret = nouveau_drm_device_init(drm_dev);
+ if (ret)
+ goto fail_pci;
+
+ ret = drm_dev_register(drm_dev, pent->driver_data);
+ if (ret)
+ goto fail_drm_dev_init;
+
+ return 0;
+
+fail_drm_dev_init:
+ nouveau_drm_device_fini(drm_dev);
+fail_pci:
+ pci_disable_device(pdev);
+fail_drm:
+ drm_dev_put(drm_dev);
+fail_nvkm:
+ nvkm_device_del(&device);
+ return ret;
+}
+
void
nouveau_drm_device_remove(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_client *client;
struct nvkm_device *device;
+ drm_dev_unregister(dev);
+
dev->irq_enabled = false;
client = nvxx_client(&drm->client.base);
device = nvkm_device_find(client->device);
- drm_put_dev(dev);
+ nouveau_drm_device_fini(dev);
+ pci_disable_device(pdev);
+ drm_dev_put(dev);
nvkm_device_del(&device);
}
@@ -1018,8 +1051,6 @@ driver_stub = {
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
DRIVER_KMS_LEGACY_CONTEXT,
- .load = nouveau_drm_load,
- .unload = nouveau_drm_unload,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
.lastclose = nouveau_vga_lastclose,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 6e1acaec3400..0b2191fa96f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -194,8 +194,6 @@ struct nouveau_drm {
/* modesetting */
struct nvbios vbios;
struct nouveau_display *display;
- struct backlight_device *backlight;
- struct list_head bl_connectors;
struct work_struct hpd_work;
struct work_struct fbcon_work;
int fbcon_new_state;
@@ -244,10 +242,12 @@ void nouveau_drm_device_remove(struct drm_device *dev);
struct nouveau_cli *_cli = (c); \
dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
+
#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
+
#define NV_DEBUG(drm,f,a...) do { \
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
NV_PRINTK(info, &(drm)->client, f, ##a); \
@@ -257,6 +257,12 @@ void nouveau_drm_device_remove(struct drm_device *dev);
NV_PRINTK(info, &(drm)->client, f, ##a); \
} while(0)
+#define NV_PRINTK_ONCE(l,c,f,a...) NV_PRINTK(l##_once,c,f, ##a)
+
+#define NV_ERROR_ONCE(drm,f,a...) NV_PRINTK_ONCE(err, &(drm)->client, f, ##a)
+#define NV_WARN_ONCE(drm,f,a...) NV_PRINTK_ONCE(warn, &(drm)->client, f, ##a)
+#define NV_INFO_ONCE(drm,f,a...) NV_PRINTK_ONCE(info, &(drm)->client, f, ##a)
+
extern int nouveau_modeset;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 3d485dbf310a..8089ac9a12e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -50,6 +50,7 @@ nvkm-y += nvkm/engine/disp/hdmig84.o
nvkm-y += nvkm/engine/disp/hdmigt215.o
nvkm-y += nvkm/engine/disp/hdmigf119.o
nvkm-y += nvkm/engine/disp/hdmigk104.o
+nvkm-y += nvkm/engine/disp/hdmigm200.o
nvkm-y += nvkm/engine/disp/hdmigv100.o
nvkm-y += nvkm/engine/disp/conn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
new file mode 100644
index 000000000000..9b16a08eb4d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+#include "hdmi.h"
+
+void
+gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+ const u32 ctrl = scdc & 0x3;
+
+ nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl);
+
+ ior->tmds.high_speed = !!(scdc & 0x2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 19911211a12a..0f0c86c32ec3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -41,6 +41,11 @@ struct nvkm_ior {
u8 nr;
u8 bw;
} dp;
+
+ /* Armed TMDS state. */
+ struct {
+ bool high_speed;
+ } tmds;
};
struct nvkm_ior_func {
@@ -61,6 +66,7 @@ struct nvkm_ior_func {
void (*ctrl)(struct nvkm_ior *, int head, bool enable,
u8 max_ac_packet, u8 rekey, u8 *avi, u8 avi_size,
u8 *vendor, u8 vendor_size);
+ void (*scdc)(struct nvkm_ior *, int head, u8 scdc);
} hdmi;
struct {
@@ -144,6 +150,8 @@ void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gk104_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
+void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
+
void gt215_hda_hpd(struct nvkm_ior *, int, bool);
void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 3aa5a2879239..5f758948d6e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -176,9 +176,10 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
- "max_ac_packet %d rekey %d\n",
+ "max_ac_packet %d rekey %d scdc %d\n",
args->v0.version, args->v0.state,
- args->v0.max_ac_packet, args->v0.rekey);
+ args->v0.max_ac_packet, args->v0.rekey,
+ args->v0.scdc);
if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
return -EINVAL;
if ((args->v0.avi_infoframe_length
@@ -202,6 +203,11 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
args->v0.max_ac_packet,
args->v0.rekey, avi, avi_size,
vendor, vendor_size);
+
+ if (outp->ior->func->hdmi.scdc)
+ outp->ior->func->hdmi.scdc(
+ outp->ior, hidx, args->v0.scdc);
+
return 0;
}
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index e6e6dfbb1283..456a5a143522 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -120,13 +120,16 @@ void
gf119_sor_clock(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
- const int div = sor->asy.link == 3;
const u32 soff = nv50_ior_base(sor);
+ u32 div1 = sor->asy.link == 3;
+ u32 div2 = sor->asy.link == 3;
if (sor->asy.proto == TMDS) {
- /* NFI why, but this sets DP_LINK_BW_2_7 when using TMDS. */
- nvkm_mask(device, 0x612300 + soff, 0x007c0000, 0x0a << 18);
+ const u32 speed = sor->tmds.high_speed ? 0x14 : 0x0a;
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, speed << 18);
+ if (sor->tmds.high_speed)
+ div2 = 1;
}
- nvkm_mask(device, 0x612300 + soff, 0x00000707, (div << 8) | div);
+ nvkm_mask(device, 0x612300 + soff, 0x00000707, (div2 << 8) | div1);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index d892bdf04034..384f82652bec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -99,6 +99,7 @@ gm200_sor = {
.clock = gf119_sor_clock,
.hdmi = {
.ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
},
.dp = {
.lanes = { 0, 1, 2, 3 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index 040db8a338de..8ba881a729ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -88,6 +88,7 @@ gv100_sor = {
.clock = gf119_sor_clock,
.hdmi = {
.ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
},
.dp = {
.lanes = { 0, 1, 2, 3 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index d02e183717dc..5c14d6ac855d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -801,6 +801,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
bl = acr->hsbl_unload_blob;
} else {
nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
+ kfree(bl_desc);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index e61a9592a650..ba82d916719c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -1140,18 +1140,6 @@ static void dispc_ovl_set_color_mode(struct dispc_device *dispc,
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static bool format_is_yuv(u32 fourcc)
-{
- switch (fourcc) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_NV12:
- return true;
- default:
- return false;
- }
-}
-
static void dispc_ovl_configure_burst_type(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_dss_rotation_type rotation)
@@ -1910,11 +1898,14 @@ static void dispc_ovl_set_scaling_uv(struct dispc_device *dispc,
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
+ const struct drm_format_info *info;
+
+ info = drm_format_info(fourcc);
if (!dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE))
return;
- if (!format_is_yuv(fourcc)) {
+ if (!info->is_yuv) {
/* reset chroma resampling for RGB formats */
if (plane != OMAP_DSS_WB)
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
@@ -2624,7 +2615,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
unsigned int offset0, offset1;
s32 row_inc;
s32 pix_inc;
- u16 frame_width, frame_height;
+ u16 frame_width;
unsigned int field_offset = 0;
u16 in_height = height;
u16 in_width = width;
@@ -2632,6 +2623,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(dispc, plane);
unsigned long lclk = dispc_plane_lclk_rate(dispc, plane);
+ const struct drm_format_info *info;
+
+ info = drm_format_info(fourcc);
/* when setting up WB, dispc_plane_pclk_rate() returns 0 */
if (plane == OMAP_DSS_WB)
@@ -2640,7 +2634,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
- if (format_is_yuv(fourcc) && (in_width & 1)) {
+ if (info->is_yuv && (in_width & 1)) {
DSSERR("input width %d is not even for YUV format\n", in_width);
return -EINVAL;
}
@@ -2680,7 +2674,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
DSSDBG("predecimation %d x %x, new input size %d x %d\n",
x_predecim, y_predecim, in_width, in_height);
- if (format_is_yuv(fourcc) && (in_width & 1)) {
+ if (info->is_yuv && (in_width & 1)) {
DSSDBG("predecimated input width is not even for YUV format\n");
DSSDBG("adjusting input width %d -> %d\n",
in_width, in_width & ~1);
@@ -2688,7 +2682,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
in_width &= ~1;
}
- if (format_is_yuv(fourcc))
+ if (info->is_yuv)
cconv = 1;
if (ilace && !fieldmode) {
@@ -2714,13 +2708,10 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
row_inc = 0;
pix_inc = 0;
- if (plane == OMAP_DSS_WB) {
+ if (plane == OMAP_DSS_WB)
frame_width = out_width;
- frame_height = out_height;
- } else {
+ else
frame_width = in_width;
- frame_height = height;
- }
calc_offset(screen_width, frame_width,
fourcc, fieldmode, field_offset,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 19fc4dfc429e..1aaf260aa9b8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -947,7 +947,7 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
&dss_debug_fops);
if (IS_ERR(d)) {
kfree(entry);
- return ERR_PTR(PTR_ERR(d));
+ return ERR_CAST(d);
}
entry->dentry = d;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 98f5ca29444a..b81302c4bf9e 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -164,10 +164,11 @@ static void omap_connector_destroy(struct drm_connector *connector)
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- kfree(omap_connector);
omapdss_device_put(omap_connector->output);
omapdss_device_put(omap_connector->display);
+
+ kfree(omap_connector);
}
#define MAX_EDID 512
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index c2785cc98dc9..60bb3f9297bc 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -159,6 +159,7 @@ struct dmm_platform_data {
struct dmm {
struct device *dev;
+ dma_addr_t phys_base;
void __iomem *base;
int irq;
@@ -189,6 +190,12 @@ struct dmm {
struct list_head alloc_head;
const struct dmm_platform_data *plat_data;
+
+ bool dmm_workaround;
+ spinlock_t wa_lock;
+ u32 *wa_dma_data;
+ dma_addr_t wa_dma_handle;
+ struct dma_chan *wa_dma_chan;
};
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f92fe205550b..252f5ebb1acc 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -18,6 +18,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -79,14 +80,138 @@ static const u32 reg[][4] = {
DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
+static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
+{
+ struct dma_device *dma_dev = dmm->wa_dma_chan->device;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_status status;
+ dma_cookie_t cookie;
+
+ tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+ if (!tx) {
+ dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(dmm->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(dmm->wa_dma_chan);
+ status = dma_sync_wait(dmm->wa_dma_chan, cookie);
+ if (status != DMA_COMPLETE)
+ dev_err(dmm->dev, "i878 wa DMA copy failure\n");
+
+ dmaengine_terminate_all(dmm->wa_dma_chan);
+ return 0;
+}
+
+static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
+{
+ dma_addr_t src, dst;
+ int r;
+
+ src = dmm->phys_base + reg;
+ dst = dmm->wa_dma_handle;
+
+ r = dmm_dma_copy(dmm, src, dst);
+ if (r) {
+ dev_err(dmm->dev, "sDMA read transfer timeout\n");
+ return readl(dmm->base + reg);
+ }
+
+ /*
+ * As per i878 workaround, the DMA is used to access the DMM registers.
+ * Make sure that the readl is not moved by the compiler or the CPU
+ * earlier than the DMA finished writing the value to memory.
+ */
+ rmb();
+ return readl(dmm->wa_dma_data);
+}
+
+static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
+{
+ dma_addr_t src, dst;
+ int r;
+
+ writel(val, dmm->wa_dma_data);
+ /*
+ * As per i878 workaround, the DMA is used to access the DMM registers.
+ * Make sure that the writel is not moved by the compiler or the CPU, so
+ * the data will be in place before we start the DMA to do the actual
+ * register write.
+ */
+ wmb();
+
+ src = dmm->wa_dma_handle;
+ dst = dmm->phys_base + reg;
+
+ r = dmm_dma_copy(dmm, src, dst);
+ if (r) {
+ dev_err(dmm->dev, "sDMA write transfer timeout\n");
+ writel(val, dmm->base + reg);
+ }
+}
+
static u32 dmm_read(struct dmm *dmm, u32 reg)
{
- return readl(dmm->base + reg);
+ if (dmm->dmm_workaround) {
+ u32 v;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dmm->wa_lock, flags);
+ v = dmm_read_wa(dmm, reg);
+ spin_unlock_irqrestore(&dmm->wa_lock, flags);
+
+ return v;
+ } else {
+ return readl(dmm->base + reg);
+ }
}
static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
{
- writel(val, dmm->base + reg);
+ if (dmm->dmm_workaround) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dmm->wa_lock, flags);
+ dmm_write_wa(dmm, val, reg);
+ spin_unlock_irqrestore(&dmm->wa_lock, flags);
+ } else {
+ writel(val, dmm->base + reg);
+ }
+}
+
+static int dmm_workaround_init(struct dmm *dmm)
+{
+ dma_cap_mask_t mask;
+
+ spin_lock_init(&dmm->wa_lock);
+
+ dmm->wa_dma_data = dma_alloc_coherent(dmm->dev, sizeof(u32),
+ &dmm->wa_dma_handle, GFP_KERNEL);
+ if (!dmm->wa_dma_data)
+ return -ENOMEM;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dmm->wa_dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!dmm->wa_dma_chan) {
+ dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void dmm_workaround_uninit(struct dmm *dmm)
+{
+ dma_release_channel(dmm->wa_dma_chan);
+
+ dma_free_coherent(dmm->dev, 4, dmm->wa_dma_data, dmm->wa_dma_handle);
}
/* simple allocator to grab next 16 byte aligned memory from txn */
@@ -285,6 +410,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}
txn->last_pat->next_pa = 0;
+ /* ensure that the written descriptors are visible to DMM */
+ wmb();
+
+ /*
+ * NOTE: the wmb() above should be enough, but there seems to be a bug
+ * in OMAP's memory barrier implementation, which in some rare cases may
+ * cause the writes not to be observable after wmb().
+ */
+
+ /* read back to ensure the data is in RAM */
+ readl(&txn->last_pat->next_pa);
/* write to PAT_DESCR to clear out any pending transaction */
dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
@@ -603,6 +739,10 @@ static int omap_dmm_remove(struct platform_device *dev)
unsigned long flags;
if (omap_dmm) {
+ /* Disable all enabled interrupts */
+ dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_CLR);
+ free_irq(omap_dmm->irq, omap_dmm);
+
/* free all area regions */
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
@@ -625,8 +765,8 @@ static int omap_dmm_remove(struct platform_device *dev)
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
- if (omap_dmm->irq > 0)
- free_irq(omap_dmm->irq, omap_dmm);
+ if (omap_dmm->dmm_workaround)
+ dmm_workaround_uninit(omap_dmm);
iounmap(omap_dmm->base);
kfree(omap_dmm);
@@ -673,6 +813,7 @@ static int omap_dmm_probe(struct platform_device *dev)
goto fail;
}
+ omap_dmm->phys_base = mem->start;
omap_dmm->base = ioremap(mem->start, SZ_2K);
if (!omap_dmm->base) {
@@ -688,6 +829,22 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->dev = &dev->dev;
+ if (of_machine_is_compatible("ti,dra7")) {
+ /*
+ * DRA7 Errata i878 says that MPU should not be used to access
+ * RAM and DMM at the same time. As it's not possible to prevent
+ * MPU accessing RAM, we need to access DMM via a proxy.
+ */
+ if (!dmm_workaround_init(omap_dmm)) {
+ omap_dmm->dmm_workaround = true;
+ dev_info(&dev->dev,
+ "workaround for errata i878 in use\n");
+ } else {
+ dev_warn(&dev->dev,
+ "failed to initialize work-around for i878\n");
+ }
+ }
+
hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
@@ -714,24 +871,6 @@ static int omap_dmm_probe(struct platform_device *dev)
dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
- ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
- "omap_dmm_irq_handler", omap_dmm);
-
- if (ret) {
- dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
- omap_dmm->irq, ret);
- omap_dmm->irq = -1;
- goto fail;
- }
-
- /* Enable all interrupts for each refill engine except
- * ERR_LUT_MISS<n> (which is just advisory, and we don't care
- * about because we want to be able to refill live scanout
- * buffers for accelerated pan/scroll) and FILL_DSC<n> which
- * we just generally don't care about.
- */
- dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
-
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
dev_err(&dev->dev, "could not allocate dummy page\n");
@@ -823,6 +962,24 @@ static int omap_dmm_probe(struct platform_device *dev)
.p1.y = omap_dmm->container_height - 1,
};
+ ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+ "omap_dmm_irq_handler", omap_dmm);
+
+ if (ret) {
+ dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
+ omap_dmm->irq, ret);
+ omap_dmm->irq = -1;
+ goto fail;
+ }
+
+ /* Enable all interrupts for each refill engine except
+ * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+ * about because we want to be able to refill live scanout
+ * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+ * we just generally don't care about.
+ */
+ dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
+
/* initialize all LUTs to dummy page entries */
for (i = 0; i < omap_dmm->num_lut; i++) {
area.tcm = omap_dmm->tcm[i];
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 5f98506ac2c5..5e67d58cbc28 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -439,7 +439,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
args->size = omap_gem_mmap_size(obj);
args->offset = omap_gem_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -614,7 +614,7 @@ err_gem_deinit:
omap_disconnect_pipelines(ddev);
err_crtc_uninit:
omap_crtc_pre_uninit(priv);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return ret;
}
@@ -643,7 +643,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
omap_disconnect_pipelines(ddev);
omap_crtc_pre_uninit(priv);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
}
static int pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 9f1e3d8f8488..4d264fd554d8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -319,7 +319,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
error:
while (--i >= 0)
- drm_gem_object_unreference_unlocked(bos[i]);
+ drm_gem_object_put_unlocked(bos[i]);
return fb;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b445309b0143..aee99194499f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -150,7 +150,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_unreference_unlocked(fbdev->bo);
+ drm_gem_object_put_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 4ba5d035c590..8dcaf9f4aa75 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -638,7 +638,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = omap_gem_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
fail:
return ret;
@@ -1312,7 +1312,7 @@ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ec04a69ade46..0f8b597ccd10 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -168,7 +168,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return obj;
}
}
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
deleted file mode 100644
index 460e63dbf825..000000000000
--- a/drivers/gpu/drm/omapdrm/tcm-sita.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * SImple Tiler Allocator (SiTA) private structures.
- *
- * Copyright (C) 2009-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ravi Ramachandra <r.ramachandra@ti.com>
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * * Neither the name of Texas Instruments Incorporated nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TCM_SITA_H
-#define _TCM_SITA_H
-
-#include "tcm.h"
-
-/* length between two coordinates */
-#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
-
-enum criteria {
- CR_MAX_NEIGHS = 0x01,
- CR_FIRST_FOUND = 0x10,
- CR_BIAS_HORIZONTAL = 0x20,
- CR_BIAS_VERTICAL = 0x40,
- CR_DIAGONAL_BALANCE = 0x80
-};
-
-/* nearness to the beginning of the search field from 0 to 1000 */
-struct nearness_factor {
- s32 x;
- s32 y;
-};
-
-/*
- * Statistics on immediately neighboring slots. Edge is the number of
- * border segments that are also border segments of the scan field. Busy
- * refers to the number of neighbors that are occupied.
- */
-struct neighbor_stats {
- u16 edge;
- u16 busy;
-};
-
-/* structure to keep the score of a potential allocation */
-struct score {
- struct nearness_factor f;
- struct neighbor_stats n;
- struct tcm_area a;
- u16 neighs; /* number of busy neighbors */
-};
-
-struct sita_pvt {
- spinlock_t lock; /* spinlock to protect access */
- struct tcm_pt div_pt; /* divider point splitting container */
- struct tcm_area ***map; /* pointers to the parent area for each slot */
-};
-
-/* assign coordinates to area */
-static inline
-void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
-{
- a->p0.x = x0;
- a->p0.y = y0;
- a->p1.x = x1;
- a->p1.y = y1;
-}
-
-#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 54324330b91f..f471537c852f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2416,7 +2416,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
size = radeon_get_ib_value(p, idx+1+(i*8)+1);
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n");
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index ba704633b072..52a7246fed9e 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/* utility to create the register check tables
* this includes inlined list.h safe for userspace.
*
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index ad16a925f8d5..57e2b09784be 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#define R100_TRACK_MAX_TEXTURE 3
#define R200_TRACK_MAX_TEXTURE 6
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index f920be236cc9..84b3ad2172a3 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include <drm/drmP.h>
#include <drm/drm_dp_mst_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 611cf934b211..4278272e3191 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index bc26efd1793e..0d84b8aafab3 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RADEON_TRACE_H_
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
index 66b3d5084662..65e92302f974 100644
--- a/drivers/gpu/drm/radeon/radeon_trace_points.c
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/* Copyright Red Hat Inc 2010.
* Author : Dave Airlie <airlied@redhat.com>
*/
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4e5e95c0cab5..3e22a54a99c2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -467,8 +467,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
struct dma_fence *fence;
struct drm_sched_rq *rq;
- if (!spsc_queue_count(&entity->job_queue) == 0 ||
- entity->num_rq_list <= 1)
+ if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
return;
fence = READ_ONCE(entity->last_scheduled);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 9ca741f3a0bc..44fe587aaef9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -182,6 +182,20 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
}
EXPORT_SYMBOL(drm_sched_dependency_optimized);
+/**
+ * drm_sched_start_timeout - start timeout for reset worker
+ *
+ * @sched: scheduler instance to start the worker for
+ *
+ * Start the timeout for the given scheduler.
+ */
+static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+{
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_empty(&sched->ring_mirror_list))
+ schedule_delayed_work(&sched->work_tdr, sched->timeout);
+}
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -197,19 +211,13 @@ static void drm_sched_job_finish(struct work_struct *work)
* manages to find this job as the next job in the list, the fence
* signaled check below will prevent the timeout to be restarted.
*/
- cancel_delayed_work_sync(&s_job->work_tdr);
+ cancel_delayed_work_sync(&sched->work_tdr);
spin_lock(&sched->job_list_lock);
- /* queue TDR for next job */
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
- struct drm_sched_job *next = list_next_entry(s_job, node);
-
- if (!dma_fence_is_signaled(&next->s_fence->finished))
- schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
/* remove job from ring_mirror_list */
list_del(&s_job->node);
+ /* queue TDR for next job */
+ drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
dma_fence_put(&s_job->s_fence->finished);
@@ -233,19 +241,49 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node) == s_job)
- schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
- struct drm_sched_job *job = container_of(work, struct drm_sched_job,
- work_tdr.work);
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_job *job;
+ int r;
- job->sched->ops->timedout_job(job);
+ sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *fence = job->s_fence;
+
+ if (!dma_fence_remove_callback(fence->parent, &fence->cb))
+ goto already_signaled;
+ }
+
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct drm_sched_job, node);
+ spin_unlock(&sched->job_list_lock);
+
+ if (job)
+ sched->ops->timedout_job(job);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry(job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *fence = job->s_fence;
+
+ if (!fence->parent || !list_empty(&fence->cb.node))
+ continue;
+
+ r = dma_fence_add_callback(fence->parent, &fence->cb,
+ drm_sched_process_job);
+ if (r)
+ drm_sched_process_job(fence->parent, &fence->cb);
+
+already_signaled:
+ ;
+ }
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -312,11 +350,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
int r;
spin_lock(&sched->job_list_lock);
- s_job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
- if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
- schedule_delayed_work(&s_job->work_tdr, sched->timeout);
-
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -349,6 +382,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
}
spin_lock(&sched->job_list_lock);
}
+ drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -384,7 +418,6 @@ int drm_sched_job_init(struct drm_sched_job *job,
INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
- INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
return 0;
}
@@ -575,6 +608,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
+ INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 965088afcfad..f80e82e16475 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1988,6 +1988,28 @@ static int tegra_dc_init(struct host1x_client *client)
struct drm_plane *cursor = NULL;
int err;
+ /*
+ * XXX do not register DCs with no window groups because we cannot
+ * assign a primary plane to them, which in turn will cause KMS to
+ * crash.
+ */
+ if (dc->soc->wgrps) {
+ bool has_wgrps = false;
+ unsigned int i;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe && wgrp->num_windows > 0) {
+ has_wgrps = true;
+ break;
+ }
+ }
+
+ if (!has_wgrps)
+ return 0;
+ }
+
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
@@ -2234,8 +2256,59 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
};
+static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
+ {
+ .index = 0,
+ .dc = 0,
+ .windows = (const unsigned int[]) { 0 },
+ .num_windows = 1,
+ }, {
+ .index = 1,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 1 },
+ .num_windows = 1,
+ }, {
+ .index = 2,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 2 },
+ .num_windows = 1,
+ }, {
+ .index = 3,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 3 },
+ .num_windows = 1,
+ }, {
+ .index = 4,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 4 },
+ .num_windows = 1,
+ }, {
+ .index = 5,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 5 },
+ .num_windows = 1,
+ },
+};
+
+static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
+ .supports_background_color = true,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .has_legacy_blending = false,
+ .pitch_align = 64,
+ .has_powergate = false,
+ .coupled_pm = false,
+ .has_nvdisplay = true,
+ .wgrps = tegra194_dc_wgrps,
+ .num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
+};
+
static const struct of_device_id tegra_dc_of_match[] = {
{
+ .compatible = "nvidia,tegra194-dc",
+ .data = &tegra194_dc_soc_info,
+ }, {
.compatible = "nvidia,tegra186-dc",
.data = &tegra186_dc_soc_info,
}, {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index e96f582ca692..1256dfb6b2f5 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -300,7 +300,7 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define SOR1_TIMING_CYA (1 << 27)
#define CURSOR_ENABLE (1 << 16)
-#define SOR_ENABLE(x) (1 << (25 + (x)))
+#define SOR_ENABLE(x) (1 << (25 + (((x) > 1) ? ((x) + 1) : (x))))
#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index d84e81ff36ad..ee4180d8db14 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -521,7 +521,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
* is no possibility to perform the I2C mode configuration in the
* HDMI path.
*/
- err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
+ err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
if (err < 0)
return err;
@@ -639,6 +639,7 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
};
static const struct of_device_id tegra_dpaux_of_match[] = {
+ { .compatible = "nvidia,tegra194-dpaux", },
{ .compatible = "nvidia,tegra186-dpaux", },
{ .compatible = "nvidia,tegra210-dpaux", },
{ .compatible = "nvidia,tegra124-dpaux", },
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8cdb610561ba..65ea4988b332 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -15,6 +15,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
#include "drm.h"
#include "gem.h"
@@ -1068,6 +1072,14 @@ struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
}
if (!shared || (shared && (group != tegra->group))) {
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (client->dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping =
+ to_dma_iommu_mapping(client->dev);
+ arm_iommu_detach_device(client->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
@@ -1216,31 +1228,15 @@ static int host1x_drm_remove(struct host1x_device *dev)
static int host1x_drm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct tegra_drm *tegra = drm->dev_private;
-
- drm_kms_helper_poll_disable(drm);
- tegra_drm_fb_suspend(drm);
- tegra->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(tegra->state)) {
- tegra_drm_fb_resume(drm);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(tegra->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int host1x_drm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct tegra_drm *tegra = drm->dev_private;
-
- drm_atomic_helper_resume(drm, tegra->state);
- tegra_drm_fb_resume(drm);
- drm_kms_helper_poll_enable(drm);
- return 0;
+ return drm_mode_config_helper_resume(drm);
}
#endif
@@ -1275,6 +1271,9 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra186-sor", },
{ .compatible = "nvidia,tegra186-sor1", },
{ .compatible = "nvidia,tegra186-vic", },
+ { .compatible = "nvidia,tegra194-display", },
+ { .compatible = "nvidia,tegra194-dc", },
+ { .compatible = "nvidia,tegra194-sor", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 92d248784396..1012335bb489 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -60,8 +60,6 @@ struct tegra_drm {
unsigned int pitch_align;
struct tegra_display_hub *hub;
-
- struct drm_atomic_state *state;
};
struct tegra_drm_client;
@@ -186,8 +184,6 @@ int tegra_drm_fb_prepare(struct drm_device *drm);
void tegra_drm_fb_free(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
-void tegra_drm_fb_suspend(struct drm_device *drm);
-void tegra_drm_fb_resume(struct drm_device *drm);
extern struct platform_driver tegra_display_hub_driver;
extern struct platform_driver tegra_dc_driver;
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 4c22cdded3c2..b947e82bbeb1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -356,7 +356,7 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
/* Undo the special mapping we made in fbdev probe. */
if (bo && bo->pages) {
vunmap(bo->vaddr);
- bo->vaddr = 0;
+ bo->vaddr = NULL;
}
drm_framebuffer_remove(fbdev->fb);
@@ -412,25 +412,3 @@ void tegra_drm_fb_exit(struct drm_device *drm)
tegra_fbdev_exit(tegra->fbdev);
#endif
}
-
-void tegra_drm_fb_suspend(struct drm_device *drm)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct tegra_drm *tegra = drm->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(&tegra->fbdev->base, 1);
- console_unlock();
-#endif
-}
-
-void tegra_drm_fb_resume(struct drm_device *drm)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct tegra_drm *tegra = drm->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(&tegra->fbdev->base, 0);
- console_unlock();
-#endif
-}
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8f4fcbb515fb..6112d9042979 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -758,10 +758,12 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
return err;
}
- hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
- if (IS_ERR(hub->clk_dsc)) {
- err = PTR_ERR(hub->clk_dsc);
- return err;
+ if (hub->soc->supports_dsc) {
+ hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
+ if (IS_ERR(hub->clk_dsc)) {
+ err = PTR_ERR(hub->clk_dsc);
+ return err;
+ }
}
hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
@@ -890,10 +892,19 @@ static const struct dev_pm_ops tegra_display_hub_pm_ops = {
static const struct tegra_display_hub_soc tegra186_display_hub = {
.num_wgrps = 6,
+ .supports_dsc = true,
+};
+
+static const struct tegra_display_hub_soc tegra194_display_hub = {
+ .num_wgrps = 6,
+ .supports_dsc = false,
};
static const struct of_device_id tegra_display_hub_of_match[] = {
{
+ .compatible = "nvidia,tegra194-display",
+ .data = &tegra194_display_hub
+ }, {
.compatible = "nvidia,tegra186-display",
.data = &tegra186_display_hub
}, {
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 85b8bf41a395..6696a85fc1f2 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -38,6 +38,7 @@ to_tegra_shared_plane(struct drm_plane *plane)
struct tegra_display_hub_soc {
unsigned int num_wgrps;
+ bool supports_dsc;
};
struct tegra_display_hub {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index d7fe9f15def1..b129da2e5afd 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -282,6 +282,85 @@ static const struct tegra_sor_hdmi_settings tegra186_sor_hdmi_defaults[] = {
}
};
+static const struct tegra_sor_hdmi_settings tegra194_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x54,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 1,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x44,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 0x66 /* 0 */,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x00, /* 0x34 */
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x37 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 64,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 12,
+ .tx_pu_value = 96,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }
+};
+
struct tegra_sor_regs {
unsigned int head_state0;
unsigned int head_state1;
@@ -2894,7 +2973,38 @@ static const struct tegra_sor_soc tegra186_sor1 = {
.xbar_cfg = tegra124_sor_xbar_cfg,
};
+static const struct tegra_sor_regs tegra194_sor_regs = {
+ .head_state0 = 0x151,
+ .head_state1 = 0x155,
+ .head_state2 = 0x159,
+ .head_state3 = 0x15d,
+ .head_state4 = 0x161,
+ .head_state5 = 0x165,
+ .pll0 = 0x169,
+ .pll1 = 0x16a,
+ .pll2 = 0x16b,
+ .pll3 = 0x16c,
+ .dp_padctl0 = 0x16e,
+ .dp_padctl2 = 0x16f,
+};
+
+static const struct tegra_sor_soc tegra194_sor = {
+ .supports_edp = true,
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+
+ .regs = &tegra194_sor_regs,
+ .has_nvdisplay = true,
+
+ .num_settings = ARRAY_SIZE(tegra194_sor_hdmi_defaults),
+ .settings = tegra194_sor_hdmi_defaults,
+
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+};
+
static const struct of_device_id tegra_sor_of_match[] = {
+ { .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
{ .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 0fb300d41a09..33e533268488 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -554,29 +554,23 @@ static struct drm_driver tilcdc_driver = {
static int tilcdc_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct tilcdc_drm_private *priv = ddev->dev_private;
+ int ret = 0;
- priv->saved_state = drm_atomic_helper_suspend(ddev);
+ ret = drm_mode_config_helper_suspend(ddev);
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
- return 0;
+ return ret;
}
static int tilcdc_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct tilcdc_drm_private *priv = ddev->dev_private;
- int ret = 0;
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
-
- if (priv->saved_state)
- ret = drm_atomic_helper_resume(ddev, priv->saved_state);
-
- return ret;
+ return drm_mode_config_helper_resume(ddev);
}
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index ead512216669..62cea5ff5558 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -70,9 +70,6 @@ struct tilcdc_drm_private {
const uint32_t *pixelformats;
uint32_t num_pixelformats;
- /* The context for pm susped/resume cycle is stored here */
- struct drm_atomic_state *saved_state;
-
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index a60e560804e0..01fc670ce7a2 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
- ttm_bo_manager.o ttm_page_alloc_dma.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
+ ttm_page_alloc_dma.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index b2a33bf1ef10..26b889f86670 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -247,20 +247,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
-static void ttm_list_move_bulk_tail(struct list_head *list,
- struct list_head *first,
- struct list_head *last)
-{
- first->prev->next = last->next;
- last->next->prev = first->prev;
-
- list->prev->next = first;
- first->prev = list->prev;
-
- last->next = list;
- list->prev = last;
-}
-
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i;
@@ -276,8 +262,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
reservation_object_assert_held(pos->last->resv);
man = &pos->first->bdev->man[TTM_PL_TT];
- ttm_list_move_bulk_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[i], &pos->first->lru,
+ &pos->last->lru);
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
@@ -291,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
reservation_object_assert_held(pos->last->resv);
man = &pos->first->bdev->man[TTM_PL_VRAM];
- ttm_list_move_bulk_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[i], &pos->first->lru,
+ &pos->last->lru);
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
@@ -306,8 +292,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
reservation_object_assert_held(pos->last->resv);
lru = &pos->first->bdev->glob->swap_lru[i];
- ttm_list_move_bulk_tail(lru, &pos->first->swap,
- &pos->last->swap);
+ list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6fe91c1b692d..a1d977fbade5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -409,8 +409,7 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object, vma_node);
- if (!kref_get_unless_zero(&bo->kref))
- bo = NULL;
+ bo = ttm_bo_get_unless_zero(bo);
}
drm_vma_offset_unlock_lookup(&bdev->vma_manager);
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index a5501581d96b..9243dea6e6ad 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -168,7 +168,7 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
job->timedout_ctca = ctca;
job->timedout_ctra = ctra;
- schedule_delayed_work(&job->base.work_tdr,
+ schedule_delayed_work(&job->base.sched->work_tdr,
job->base.sched->timeout);
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 09b2aa08363e..8841bd30e1e5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
- vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o
+ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
+ vmwgfx_validation.o \
+ ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
index 20694b8a01ca..16b2083cb9d4 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c
@@ -29,13 +29,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
-#include <linux/module.h>
+#include "ttm_lock.h"
+#include "ttm_object.h"
#define TTM_WRITE_LOCK_PENDING (1 << 0)
#define TTM_VT_LOCK_PENDING (1 << 1)
@@ -52,7 +52,6 @@ void ttm_lock_init(struct ttm_lock *lock)
lock->kill_takers = false;
lock->signal = SIGKILL;
}
-EXPORT_SYMBOL(ttm_lock_init);
void ttm_read_unlock(struct ttm_lock *lock)
{
@@ -61,7 +60,6 @@ void ttm_read_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
-EXPORT_SYMBOL(ttm_read_unlock);
static bool __ttm_read_lock(struct ttm_lock *lock)
{
@@ -92,7 +90,6 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
wait_event(lock->queue, __ttm_read_lock(lock));
return ret;
}
-EXPORT_SYMBOL(ttm_read_lock);
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
{
@@ -144,7 +141,6 @@ void ttm_write_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
-EXPORT_SYMBOL(ttm_write_unlock);
static bool __ttm_write_lock(struct ttm_lock *lock)
{
@@ -185,7 +181,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret;
}
-EXPORT_SYMBOL(ttm_write_lock);
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
@@ -262,14 +257,12 @@ int ttm_vt_lock(struct ttm_lock *lock,
return ret;
}
-EXPORT_SYMBOL(ttm_vt_lock);
int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
- lock->base.hash.key, TTM_REF_USAGE);
+ lock->base.handle, TTM_REF_USAGE);
}
-EXPORT_SYMBOL(ttm_vt_unlock);
void ttm_suspend_unlock(struct ttm_lock *lock)
{
@@ -278,7 +271,6 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
-EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
@@ -300,4 +292,3 @@ void ttm_suspend_lock(struct ttm_lock *lock)
{
wait_event(lock->queue, __ttm_suspend_lock(lock));
}
-EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
new file mode 100644
index 000000000000..0c3af9836863
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h
@@ -0,0 +1,248 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <linux/wait.h>
+#include <linux/atomic.h>
+
+#include "ttm_object.h"
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ wait_queue_head_t queue;
+ spinlock_t lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 74f1b1eb1f8e..36990b80e790 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -59,13 +59,12 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/atomic.h>
+#include "ttm_object.h"
struct ttm_object_file {
struct ttm_object_device *tdev;
@@ -95,6 +94,7 @@ struct ttm_object_device {
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
+ struct idr idr;
};
/**
@@ -172,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
+ idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock);
- ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
+ ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
spin_unlock(&tdev->object_lock);
- if (unlikely(ret != 0))
- goto out_err0;
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
+ base->handle = ret;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -189,12 +190,10 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
-out_err0:
return ret;
}
-EXPORT_SYMBOL(ttm_base_object_init);
static void ttm_release_base(struct kref *kref)
{
@@ -203,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
/*
@@ -225,7 +224,41 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
kref_put(&base->refcount, ttm_release_base);
}
-EXPORT_SYMBOL(ttm_base_object_unref);
+
+/**
+ * ttm_base_object_noref_lookup - look up a base object without reference
+ * @tfile: The struct ttm_object_file the object is registered with.
+ * @key: The object handle.
+ *
+ * This function looks up a ttm base object and returns a pointer to it
+ * without refcounting the pointer. The returned pointer is only valid
+ * until ttm_base_object_noref_release() is called, and the object
+ * pointed to by the returned pointer may be doomed. Any persistent usage
+ * of the object requires a refcount to be taken using kref_get_unless_zero().
+ * Iff this function returns successfully it needs to be paired with
+ * ttm_base_object_noref_release() and no sleeping- or scheduling functions
+ * may be called inbetween these function callse.
+ *
+ * Return: A pointer to the object if successful or NULL otherwise.
+ */
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
+{
+ struct drm_hash_item *hash;
+ struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ int ret;
+
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(ht, key, &hash);
+ if (ret) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ __release(RCU);
+ return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+}
+EXPORT_SYMBOL(ttm_base_object_noref_lookup);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
@@ -247,29 +280,21 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
return base;
}
-EXPORT_SYMBOL(ttm_base_object_lookup);
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{
- struct ttm_base_object *base = NULL;
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tdev->object_hash;
- int ret;
+ struct ttm_base_object *base;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ base = idr_find(&tdev->idr, key);
- if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_base_object, hash);
- if (!kref_get_unless_zero(&base->refcount))
- base = NULL;
- }
+ if (base && !kref_get_unless_zero(&base->refcount))
+ base = NULL;
rcu_read_unlock();
return base;
}
-EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
/**
* ttm_ref_object_exists - Check whether a caller has a valid ref object
@@ -289,7 +314,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_ref_object *ref;
rcu_read_lock();
- if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
+ if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false;
/*
@@ -315,7 +340,6 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
rcu_read_unlock();
return false;
}
-EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
@@ -340,7 +364,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
+ ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -364,7 +388,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return -ENOMEM;
}
- ref->hash.key = base->hash.key;
+ ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
ref->ref_type = ref_type;
@@ -391,9 +415,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return ret;
}
-EXPORT_SYMBOL(ttm_ref_object_add);
-static void ttm_ref_object_release(struct kref *kref)
+static void __releases(tfile->lock) __acquires(tfile->lock)
+ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
@@ -435,7 +459,6 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock);
return 0;
}
-EXPORT_SYMBOL(ttm_ref_object_base_unref);
void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
@@ -464,7 +487,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
ttm_object_file_unref(&tfile);
}
-EXPORT_SYMBOL(ttm_object_file_release);
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
@@ -499,7 +521,6 @@ out_err:
return NULL;
}
-EXPORT_SYMBOL(ttm_object_file_init);
struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob,
@@ -519,6 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (ret != 0)
goto out_no_object_hash;
+ idr_init(&tdev->idr);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
@@ -530,7 +552,6 @@ out_no_object_hash:
kfree(tdev);
return NULL;
}
-EXPORT_SYMBOL(ttm_object_device_init);
void ttm_object_device_release(struct ttm_object_device **p_tdev)
{
@@ -538,11 +559,12 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
+ WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
+ idr_destroy(&tdev->idr);
drm_ht_remove(&tdev->object_hash);
kfree(tdev);
}
-EXPORT_SYMBOL(ttm_object_device_release);
/**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
@@ -641,14 +663,13 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
- *handle = base->hash.key;
+ *handle = base->handle;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
return ret;
}
-EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
/**
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
@@ -739,7 +760,6 @@ out_unref:
ttm_base_object_unref(&base);
return ret;
}
-EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
/**
* ttm_prime_object_init - Initialize a ttm_prime_object
@@ -772,4 +792,3 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
ttm_prime_refcount_release,
ref_obj_release);
}
-EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
new file mode 100644
index 000000000000..50d26c7ff42d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -0,0 +1,375 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/list.h>
+#include <drm/drm_hashtab.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_prime_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1,
+ ttm_driver_type2,
+ ttm_driver_type3,
+ ttm_driver_type4,
+ ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ struct rcu_head rhead;
+ struct ttm_object_file *tfile;
+ struct kref refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+ u32 handle;
+ enum ttm_object_type object_type;
+ u32 shareable;
+};
+
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+ struct ttm_base_object base;
+ struct mutex mutex;
+ size_t size;
+ enum ttm_object_type real_type;
+ struct dma_buf *dma_buf;
+ void (*refcount_release) (struct ttm_base_object **);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
+ *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed);
+
+extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+ struct ttm_base_object *base);
+
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @mem_glob: struct ttm_mem_global for memory accounting.
+ * @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#define ttm_base_object_kfree(__object, __base)\
+ kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+ size_t size,
+ struct ttm_prime_object *prime,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release)
+ (struct ttm_base_object **),
+ void (*ref_obj_release)
+ (struct ttm_base_object *,
+ enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+ return (base->object_type == ttm_prime_type) ?
+ container_of(base, struct ttm_prime_object, base)->real_type :
+ base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime) \
+ kfree_rcu(__obj, __prime.base.rhead)
+
+/*
+ * Extra memory required by the base object's idr storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per idr.
+ */
+#define TTM_OBJ_EXTRA_SIZE 128
+
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_noref_release - release a base object pointer looked up
+ * without reference
+ *
+ * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
+ */
+static inline void ttm_base_object_noref_release(void)
+{
+ __acquire(RCU);
+ rcu_read_unlock();
+}
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 2dda03345761..7ce1c2f87d9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -30,7 +30,7 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
-#include "drm/ttm/ttm_object.h"
+#include "ttm_object.h"
/**
@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+ ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
+ TTM_OBJ_EXTRA_SIZE;
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
- *handle = user_bo->prime.base.hash.key;
+ *handle = user_bo->prime.base.handle;
out_no_base_object:
return ret;
@@ -920,6 +921,47 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
return 0;
}
+/**
+ * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle.
+ *
+ * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * struct vmw_buffer_object it derives from without refcounting the pointer.
+ * The returned pointer is only valid until vmw_user_bo_noref_release() is
+ * called, and the object pointed to by the returned pointer may be doomed.
+ * Any persistent usage of the object requires a refcount to be taken using
+ * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
+ * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
+ * or scheduling functions may be called inbetween these function calls.
+ *
+ * Return: A struct vmw_buffer_object pointer if successful or negative
+ * error pointer on failure.
+ */
+struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_noref_lookup(tfile, handle);
+ if (!base) {
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-ESRCH);
+ }
+
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+ ttm_base_object_noref_release();
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ return &vmw_user_bo->vbo;
+}
/**
* vmw_user_bo_reference - Open a handle to a vmw user buffer object.
@@ -940,7 +982,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
- *handle = user_bo->prime.base.hash.key;
+ *handle = user_bo->prime.base.handle;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index e7e4655d3f36..48d1380a952e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
{
struct vmw_cmdbuf_header *cur = man->cur;
- WARN_ON(!mutex_is_locked(&man->cur_mutex));
+ lockdep_assert_held_once(&man->cur_mutex);
if (!cur)
return;
@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
{
struct vmw_cmdbuf_header *cur = man->cur;
- WARN_ON(!mutex_is_locked(&man->cur_mutex));
+ lockdep_assert_held_once(&man->cur_mutex);
WARN_ON(size > cur->reserved);
man->cur_pos += size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 3b75af9bf85f..4ac55fc2bf97 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -89,8 +89,7 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
if (unlikely(ret != 0))
return ERR_PTR(ret);
- return vmw_resource_reference
- (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+ return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 7c3cb8efd11a..14bd760a62fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
}
}
-
-
- vmw_resource_activate(res, vmw_hw_context_destroy);
+ res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_cotables:
@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
- vmw_resource_activate(res, vmw_hw_context_destroy);
+ res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_early:
@@ -757,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL;
}
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
- ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
+ ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
@@ -809,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
goto out_err;
}
- arg->cid = ctx->base.hash.key;
+ arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
out_unlock:
@@ -867,9 +861,8 @@ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
if (cotable_type >= SVGA_COTABLE_DX10_MAX)
return ERR_PTR(-EINVAL);
- return vmw_resource_reference
- (container_of(ctx, struct vmw_user_context, res)->
- cotables[cotable_type]);
+ return container_of(ctx, struct vmw_user_context, res)->
+ cotables[cotable_type];
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 1d45714e1d5a..44f3f6f107d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
vcotbl->type = type;
vcotbl->ctx = ctx;
- vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+ vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
return &vcotbl->res;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bb6dbbe18835..61a84b958d67 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -30,9 +30,9 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
+#include "ttm_object.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->binding_mutex);
mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
- rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 1abe21758b0d..59f614225bcd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -28,6 +28,7 @@
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
+#include "vmwgfx_validation.h"
#include "vmwgfx_reg.h"
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
@@ -35,11 +36,11 @@
#include <drm/drm_auth.h>
#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
-#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
+#include "ttm_object.h"
+#include "ttm_lock.h"
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
@@ -112,21 +113,49 @@ struct vmw_validate_buffer {
};
struct vmw_res_func;
+
+
+/**
+ * struct vmw-resource - base class for hardware resources
+ *
+ * @kref: For refcounting.
+ * @dev_priv: Pointer to the device private for this resource. Immutable.
+ * @id: Device id. Protected by @dev_priv::resource_lock.
+ * @backup_size: Backup buffer size. Immutable.
+ * @res_dirty: Resource contains data not yet in the backup buffer. Protected
+ * by resource reserved.
+ * @backup_dirty: Backup buffer contains data not yet in the HW resource.
+ * Protecte by resource reserved.
+ * @backup: The backup buffer if any. Protected by resource reserved.
+ * @backup_offset: Offset into the backup buffer if any. Protected by resource
+ * reserved. Note that only a few resource types can have a @backup_offset
+ * different from zero.
+ * @pin_count: The pin count for this resource. A pinned resource has a
+ * pin-count greater than zero. It is not on the resource LRU lists and its
+ * backup buffer is pinned. Hence it can't be evicted.
+ * @func: Method vtable for this resource. Immutable.
+ * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
+ * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
+ * @binding_head: List head for the context binding list. Protected by
+ * the @dev_priv::binding_mutex
+ * @res_free: The resource destructor.
+ * @hw_destroy: Callback to destroy the resource on the device, as part of
+ * resource destruction.
+ */
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
- bool avail;
unsigned long backup_size;
- bool res_dirty; /* Protected by backup buffer reserved */
- bool backup_dirty; /* Protected by backup buffer reserved */
+ bool res_dirty;
+ bool backup_dirty;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
- unsigned long pin_count; /* Protected by resource reserved */
+ unsigned long pin_count;
const struct vmw_res_func *func;
- struct list_head lru_head; /* Protected by the resource lock */
- struct list_head mob_head; /* Protected by @backup reserved */
- struct list_head binding_head; /* Protected by binding_mutex */
+ struct list_head lru_head;
+ struct list_head mob_head;
+ struct list_head binding_head;
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -204,29 +233,24 @@ struct vmw_fifo_state {
bool dx;
};
-struct vmw_relocation {
- SVGAMobId *mob_loc;
- SVGAGuestPtr *location;
- uint32_t index;
-};
-
/**
* struct vmw_res_cache_entry - resource information cache entry
- *
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ * @valid_handle: Whether the @handle member is valid.
* @valid: Whether the entry is valid, which also implies that the execbuf
* code holds a reference to the resource, and it's placed on the
* validation list.
- * @handle: User-space handle of a resource.
- * @res: Non-ref-counted pointer to the resource.
*
* Used to avoid frequent repeated user-space handle lookups of the
* same resource.
*/
struct vmw_res_cache_entry {
- bool valid;
uint32_t handle;
struct vmw_resource *res;
- struct vmw_resource_val_node *node;
+ void *private;
+ unsigned short valid_handle;
+ unsigned short valid;
};
/**
@@ -291,35 +315,63 @@ enum vmw_display_unit_type {
vmw_du_screen_target
};
+struct vmw_validation_context;
+struct vmw_ctx_validation_info;
+/**
+ * struct vmw_sw_context - Command submission context
+ * @res_ht: Pointer hash table used to find validation duplicates
+ * @kernel: Whether the command buffer originates from kernel code rather
+ * than from user-space
+ * @fp: If @kernel is false, points to the file of the client. Otherwise
+ * NULL
+ * @cmd_bounce: Command bounce buffer used for command validation before
+ * copying to fifo space
+ * @cmd_bounce_size: Current command bounce buffer size
+ * @cur_query_bo: Current buffer object used as query result buffer
+ * @bo_relocations: List of buffer object relocations
+ * @res_relocations: List of resource relocations
+ * @buf_start: Pointer to start of memory where command validation takes
+ * place
+ * @res_cache: Cache of recently looked up resources
+ * @last_query_ctx: Last context that submitted a query
+ * @needs_post_query_barrier: Whether a query barrier is needed after
+ * command submission
+ * @staged_bindings: Cached per-context binding tracker
+ * @staged_bindings_inuse: Whether the cached per-context binding tracker
+ * is in use
+ * @staged_cmd_res: List of staged command buffer managed resources in this
+ * command buffer
+ * @ctx_list: List of context resources referenced in this command buffer
+ * @dx_ctx_node: Validation metadata of the current DX context
+ * @dx_query_mob: The MOB used for DX queries
+ * @dx_query_ctx: The DX context used for the last DX query
+ * @man: Pointer to the command buffer managed resource manager
+ * @ctx: The validation context
+ */
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
- bool kernel; /**< is the called made from the kernel */
+ bool kernel;
struct vmw_fpriv *fp;
- struct list_head validate_nodes;
- struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
- uint32_t cur_reloc;
- struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
- uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
- struct list_head resource_list;
- struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_buffer_object *cur_query_bo;
+ struct list_head bo_relocations;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
- struct vmw_resource *error_resource;
struct vmw_ctx_binding_state *staged_bindings;
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
- struct vmw_resource_val_node *dx_ctx_node;
+ struct list_head ctx_list;
+ struct vmw_ctx_validation_info *dx_ctx_node;
struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
+ struct vmw_validation_context *ctx;
};
struct vmw_legacy_display;
@@ -444,7 +496,7 @@ struct vmw_private {
* Context and surface management.
*/
- rwlock_t resource_lock;
+ spinlock_t resource_lock;
struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
@@ -628,7 +680,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
-extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -643,6 +695,12 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
+extern struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv *
+ converter);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -662,6 +720,15 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/**
+ * vmw_user_resource_noref_release - release a user resource pointer looked up
+ * without reference
+ */
+static inline void vmw_user_resource_noref_release(void)
+{
+ ttm_base_object_noref_release();
+}
+
+/**
* Buffer object helper functions - vmwgfx_bo.c
*/
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
@@ -717,6 +784,18 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+extern struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
+
+/**
+ * vmw_user_bo_noref_release - release a buffer object pointer looked up
+ * without reference
+ */
+static inline void vmw_user_bo_noref_release(void)
+{
+ ttm_base_object_noref_release();
+}
+
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -864,10 +943,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
uint32_t fence_handle,
int32_t out_fence_fd,
struct sync_file *sync_file);
-extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f0ab6b2313bb..5a6b70ba137a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,6 +35,23 @@
#define VMW_RES_HT_ORDER 12
+/*
+ * struct vmw_relocation - Buffer object relocation
+ *
+ * @head: List head for the command submission context's relocation list
+ * @vbo: Non ref-counted pointer to buffer object
+ * @mob_loc: Pointer to location for mob id to be modified
+ * @location: Pointer to location for guest pointer to be modified
+ */
+struct vmw_relocation {
+ struct list_head head;
+ struct vmw_buffer_object *vbo;
+ union {
+ SVGAMobId *mob_loc;
+ SVGAGuestPtr *location;
+ };
+};
+
/**
* enum vmw_resource_relocation_type - Relocation type for resources
*
@@ -69,35 +86,18 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3;
};
-/**
- * struct vmw_resource_val_node - Validation info for resources
- *
- * @head: List head for the software context's resource list.
- * @hash: Hash entry for quick resouce to val_node lookup.
- * @res: Ref-counted pointer to the resource.
- * @switch_backup: Boolean whether to switch backup buffer on unreserve.
- * @new_backup: Refcounted pointer to the new backup buffer.
- * @staged_bindings: If @res is a context, tracks bindings set up during
- * the command batch. Otherwise NULL.
- * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
- * @first_usage: Set to true the first time the resource is referenced in
- * the command stream.
- * @switching_backup: The command stream provides a new backup buffer for a
- * resource.
- * @no_buffer_needed: This means @switching_backup is true on first buffer
- * reference. So resource reservation does not need to allocate a backup
- * buffer for the resource.
+/*
+ * struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ * @head: List head of context list
+ * @ctx: The context resource
+ * @cur: The context's persistent binding state
+ * @staged: The binding state changes of this command buffer
*/
-struct vmw_resource_val_node {
+struct vmw_ctx_validation_info {
struct list_head head;
- struct drm_hash_item hash;
- struct vmw_resource *res;
- struct vmw_buffer_object *new_backup;
- struct vmw_ctx_binding_state *staged_bindings;
- unsigned long new_backup_offset;
- u32 first_usage : 1;
- u32 switching_backup : 1;
- u32 no_buffer_needed : 1;
+ struct vmw_resource *ctx;
+ struct vmw_ctx_binding_state *cur;
+ struct vmw_ctx_binding_state *staged;
};
/**
@@ -127,10 +127,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p);
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_buffer_object *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
@@ -145,48 +141,38 @@ static size_t vmw_ptr_diff(void *a, void *b)
}
/**
- * vmw_resources_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @sw_context: pointer to the software context
- * @backoff: Whether command submission failed.
+ * vmw_execbuf_bindings_commit - Commit modified binding state
+ * @sw_context: The command submission context
+ * @backoff: Whether this is part of the error path and binding state
+ * changes should be ignored
*/
-static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
- bool backoff)
+static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
+ bool backoff)
{
- struct vmw_resource_val_node *val;
- struct list_head *list = &sw_context->resource_list;
+ struct vmw_ctx_validation_info *entry;
- if (sw_context->dx_query_mob && !backoff)
- vmw_context_bind_dx_query(sw_context->dx_query_ctx,
- sw_context->dx_query_mob);
+ list_for_each_entry(entry, &sw_context->ctx_list, head) {
+ if (!backoff)
+ vmw_binding_state_commit(entry->cur, entry->staged);
+ if (entry->staged != sw_context->staged_bindings)
+ vmw_binding_state_free(entry->staged);
+ else
+ sw_context->staged_bindings_inuse = false;
+ }
- list_for_each_entry(val, list, head) {
- struct vmw_resource *res = val->res;
- bool switch_backup =
- (backoff) ? false : val->switching_backup;
-
- /*
- * Transfer staged context bindings to the
- * persistent context binding tracker.
- */
- if (unlikely(val->staged_bindings)) {
- if (!backoff) {
- vmw_binding_state_commit
- (vmw_context_binding_state(val->res),
- val->staged_bindings);
- }
+ /* List entries are freed with the validation context */
+ INIT_LIST_HEAD(&sw_context->ctx_list);
+}
- if (val->staged_bindings != sw_context->staged_bindings)
- vmw_binding_state_free(val->staged_bindings);
- else
- sw_context->staged_bindings_inuse = false;
- val->staged_bindings = NULL;
- }
- vmw_resource_unreserve(res, switch_backup, val->new_backup,
- val->new_backup_offset);
- vmw_bo_unreference(&val->new_backup);
- }
+/**
+ * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ * @sw_context: The command submission context
+ */
+static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
+{
+ if (sw_context->dx_query_mob)
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+ sw_context->dx_query_mob);
}
/**
@@ -194,16 +180,17 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
* added to the validate list.
*
* @dev_priv: Pointer to the device private:
- * @sw_context: The validation context:
- * @node: The validation node holding this context.
+ * @sw_context: The command submission context
+ * @node: The validation node holding the context resource metadata
*/
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource_val_node *node)
+ struct vmw_resource *res,
+ struct vmw_ctx_validation_info *node)
{
int ret;
- ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
@@ -220,91 +207,138 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
}
if (sw_context->staged_bindings_inuse) {
- node->staged_bindings = vmw_binding_state_alloc(dev_priv);
- if (IS_ERR(node->staged_bindings)) {
+ node->staged = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(node->staged)) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
- ret = PTR_ERR(node->staged_bindings);
- node->staged_bindings = NULL;
+ ret = PTR_ERR(node->staged);
+ node->staged = NULL;
goto out_err;
}
} else {
- node->staged_bindings = sw_context->staged_bindings;
+ node->staged = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true;
}
+ node->ctx = res;
+ node->cur = vmw_context_binding_state(res);
+ list_add_tail(&node->head, &sw_context->ctx_list);
+
return 0;
out_err:
return ret;
}
/**
- * vmw_resource_val_add - Add a resource to the software context's
- * resource list if it's not already on it.
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation
+ * node
+ * @dev_priv: Pointer to the device private struct.
+ * @res_type: The resource type.
*
- * @sw_context: Pointer to the software context.
+ * Guest-backed contexts and DX contexts require extra size to store
+ * execbuf private information in the validation node. Typically the
+ * binding manager associated data structures.
+ *
+ * Returns: The extra size requirement based on resource type.
+ */
+static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
+ enum vmw_res_type res_type)
+{
+ return (res_type == vmw_res_dx_context ||
+ (res_type == vmw_res_context && dev_priv->has_mob)) ?
+ sizeof(struct vmw_ctx_validation_info) : 0;
+}
+
+/**
+ * vmw_execbuf_rcache_update - Update a resource-node cache entry
+ *
+ * @rcache: Pointer to the entry to update.
* @res: Pointer to the resource.
- * @p_node On successful return points to a valid pointer to a
- * struct vmw_resource_val_node, if non-NULL on entry.
+ * @private: Pointer to the execbuf-private space in the resource
+ * validation node.
+ */
+static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
+ struct vmw_resource *res,
+ void *private)
+{
+ rcache->res = res;
+ rcache->private = private;
+ rcache->valid = 1;
+ rcache->valid_handle = 0;
+}
+
+/**
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an
+ * unreferenced rcu-protected pointer to the validation list.
+ * @sw_context: Pointer to the software context.
+ * @res: Unreferenced rcu-protected pointer to the resource.
+ *
+ * Returns: 0 on success. Negative error code on failure. Typical error
+ * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
+ * doomed.
*/
-static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_node)
+static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_resource_val_node *node;
- struct drm_hash_item *hash;
int ret;
-
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
- &hash) == 0)) {
- node = container_of(hash, struct vmw_resource_val_node, hash);
- node->first_usage = false;
- if (unlikely(p_node != NULL))
- *p_node = node;
+ enum vmw_res_type res_type = vmw_res_type(res);
+ struct vmw_res_cache_entry *rcache;
+ struct vmw_ctx_validation_info *ctx_info;
+ bool first_usage;
+ unsigned int priv_size;
+
+ rcache = &sw_context->res_cache[res_type];
+ if (likely(rcache->valid && rcache->res == res)) {
+ vmw_user_resource_noref_release();
return 0;
}
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
- return -ENOMEM;
- }
-
- node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a resource validation "
- "entry.\n");
- kfree(node);
+ priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+ (void **)&ctx_info, &first_usage);
+ vmw_user_resource_noref_release();
+ if (ret)
return ret;
+
+ if (priv_size && first_usage) {
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+ ctx_info);
+ if (ret)
+ return ret;
}
- node->res = vmw_resource_reference(res);
- node->first_usage = true;
- if (unlikely(p_node != NULL))
- *p_node = node;
- if (!dev_priv->has_mob) {
- list_add_tail(&node->head, &sw_context->resource_list);
+ vmw_execbuf_rcache_update(rcache, res, ctx_info);
+ return 0;
+}
+
+/**
+ * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
+ * validation list if it's not already on it
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ *
+ * Returns: Zero on success. Negative error code on failure.
+ */
+static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
+{
+ struct vmw_res_cache_entry *rcache;
+ enum vmw_res_type res_type = vmw_res_type(res);
+ void *ptr;
+ int ret;
+
+ rcache = &sw_context->res_cache[res_type];
+ if (likely(rcache->valid && rcache->res == res))
return 0;
- }
- switch (vmw_res_type(res)) {
- case vmw_res_context:
- case vmw_res_dx_context:
- list_add(&node->head, &sw_context->ctx_resource_list);
- ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
- break;
- case vmw_res_cotable:
- list_add_tail(&node->head, &sw_context->ctx_resource_list);
- break;
- default:
- list_add_tail(&node->head, &sw_context->resource_list);
- break;
- }
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
+ if (ret)
+ return ret;
- return ret;
+ vmw_execbuf_rcache_update(rcache, res, ptr);
+
+ return 0;
}
/**
@@ -325,11 +359,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
* First add the resource the view is pointing to, otherwise
* it may be swapped out when the view is validated.
*/
- ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
if (ret)
return ret;
- return vmw_resource_val_add(sw_context, view, NULL);
+ return vmw_execbuf_res_noctx_val_add(sw_context, view);
}
/**
@@ -342,28 +376,33 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
*
* The view is represented by a view id and the DX context it's created on,
* or scheduled for creation on. If there is no DX context set, the function
- * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ * will return an -EINVAL error pointer.
+ *
+ * Returns: Unreferenced pointer to the resource on success, negative error
+ * pointer on failure.
*/
-static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
- enum vmw_view_type view_type, u32 id)
+static struct vmw_resource *
+vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type, u32 id)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *view;
int ret;
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view))
- return PTR_ERR(view);
+ return view;
ret = vmw_view_res_val_add(sw_context, view);
- vmw_resource_unreference(&view);
+ if (ret)
+ return ERR_PTR(ret);
- return ret;
+ return view;
}
/**
@@ -394,8 +433,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
- ret = vmw_resource_val_add(sw_context, res, NULL);
- vmw_resource_unreference(&res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (unlikely(ret != 0))
return ret;
}
@@ -407,17 +445,11 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
- /* entry->res is not refcounted */
- res = vmw_resource_reference_unless_doomed(entry->res);
- if (unlikely(res == NULL))
- continue;
-
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_resource_val_add(sw_context, entry->res,
- NULL);
- vmw_resource_unreference(&res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context,
+ entry->res);
if (unlikely(ret != 0))
break;
}
@@ -427,9 +459,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
- ret = vmw_bo_to_validate_list(sw_context,
- dx_query_mob,
- true, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ dx_query_mob, true, false);
}
mutex_unlock(&dev_priv->binding_mutex);
@@ -445,7 +476,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* id that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type.
*/
-static int vmw_resource_relocation_add(struct list_head *list,
+static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
const struct vmw_resource *res,
unsigned long offset,
enum vmw_resource_relocation_type
@@ -453,7 +484,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
{
struct vmw_resource_relocation *rel;
- rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
@@ -462,7 +493,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
rel->rel_type = rel_type;
- list_add_tail(&rel->head, list);
+ list_add_tail(&rel->head, &sw_context->res_relocations);
return 0;
}
@@ -470,16 +501,13 @@ static int vmw_resource_relocation_add(struct list_head *list,
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
- * @list: Pointer to the head of the relocation list.
+ * @list: Pointer to the head of the relocation list
*/
static void vmw_resource_relocations_free(struct list_head *list)
{
- struct vmw_resource_relocation *rel, *n;
+ /* Memory is validation context memory, so no need to free it */
- list_for_each_entry_safe(rel, n, list, head) {
- list_del(&rel->head);
- kfree(rel);
- }
+ INIT_LIST_HEAD(list);
}
/**
@@ -532,68 +560,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
}
/**
- * vmw_bo_to_validate_list - add a bo to a validate list
- *
- * @sw_context: The software context used for this command submission batch.
- * @bo: The buffer object to add.
- * @validate_as_mob: Validate this buffer as a MOB.
- * @p_val_node: If non-NULL Will be updated with the validate node number
- * on return.
- *
- * Returns -EINVAL if the limit of number of buffer objects per command
- * submission is reached.
- */
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_buffer_object *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node)
-{
- uint32_t val_node;
- struct vmw_validate_buffer *vval_buf;
- struct ttm_validate_buffer *val_buf;
- struct drm_hash_item *hash;
- int ret;
-
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
- &hash) == 0)) {
- vval_buf = container_of(hash, struct vmw_validate_buffer,
- hash);
- if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- val_buf = &vval_buf->base;
- val_node = vval_buf - sw_context->val_bufs;
- } else {
- val_node = sw_context->cur_val_buf;
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission "
- "exceeded.\n");
- return -EINVAL;
- }
- vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a buffer validation "
- "entry.\n");
- return ret;
- }
- ++sw_context->cur_val_buf;
- val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(&vbo->base);
- val_buf->shared = false;
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- vval_buf->validate_as_mob = validate_as_mob;
- }
-
- if (p_val_node)
- *p_val_node = val_node;
-
- return 0;
-}
-
-/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
*
@@ -605,27 +571,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource_val_node *val;
- int ret = 0;
-
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
-
- ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
- if (unlikely(ret != 0))
- return ret;
-
- if (res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
-
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
+ int ret;
- if (unlikely(ret != 0))
- return ret;
- }
- }
+ ret = vmw_validation_res_reserve(sw_context->ctx, true);
+ if (ret)
+ return ret;
if (sw_context->dx_query_mob) {
struct vmw_buffer_object *expected_dx_query_mob;
@@ -642,87 +592,6 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
/**
- * vmw_resources_validate - Validate all resources on the sw_context's
- * resource list.
- *
- * @sw_context: Pointer to the software context.
- *
- * Before this function is called, all resource backup buffers must have
- * been validated.
- */
-static int vmw_resources_validate(struct vmw_sw_context *sw_context)
-{
- struct vmw_resource_val_node *val;
- int ret;
-
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
- struct vmw_buffer_object *backup = res->backup;
-
- ret = vmw_resource_validate(res);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to validate resource.\n");
- return ret;
- }
-
- /* Check if the resource switched backup buffer */
- if (backup && res->backup && (backup != res->backup)) {
- struct vmw_buffer_object *vbo = res->backup;
-
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
- if (ret) {
- ttm_bo_unreserve(&vbo->base);
- return ret;
- }
- }
- }
- return 0;
-}
-
-/**
- * vmw_cmd_res_reloc_add - Add a resource to a software context's
- * relocation- and validation lists.
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @sw_context: Pointer to the software context.
- * @id_loc: Pointer to where the id that needs translation is located.
- * @res: Valid pointer to a struct vmw_resource.
- * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
- * used for this resource is returned here.
- */
-static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- uint32_t *id_loc,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_val)
-{
- int ret;
- struct vmw_resource_val_node *node;
-
- *p_val = NULL;
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
- res,
- vmw_ptr_diff(sw_context->buf_start,
- id_loc),
- vmw_res_rel_normal);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_resource_val_add(sw_context, res, &node);
- if (unlikely(ret != 0))
- return ret;
-
- if (p_val)
- *p_val = node;
-
- return 0;
-}
-
-
-/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
@@ -741,17 +610,16 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
+ struct vmw_resource **p_res)
{
- struct vmw_res_cache_entry *rcache =
- &sw_context->res_cache[res_type];
+ struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
struct vmw_resource *res;
- struct vmw_resource_val_node *node;
int ret;
+ if (p_res)
+ *p_res = NULL;
+
if (*id_loc == SVGA3D_INVALID_ID) {
- if (p_val)
- *p_val = NULL;
if (res_type == vmw_res_context) {
DRM_ERROR("Illegal context invalid id.\n");
return -EINVAL;
@@ -759,56 +627,41 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return 0;
}
- /*
- * Fastpath in case of repeated commands referencing the same
- * resource
- */
+ if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
+ res = rcache->res;
+ } else {
+ unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
- if (likely(rcache->valid && *id_loc == rcache->handle)) {
- const struct vmw_resource *res = rcache->res;
+ ret = vmw_validation_preload_res(sw_context->ctx, size);
+ if (ret)
+ return ret;
- rcache->node->first_usage = false;
- if (p_val)
- *p_val = rcache->node;
+ res = vmw_user_resource_noref_lookup_handle
+ (dev_priv, sw_context->fp->tfile, *id_loc, converter);
+ if (unlikely(IS_ERR(res))) {
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned int) *id_loc);
+ return PTR_ERR(res);
+ }
- return vmw_resource_relocation_add
- (&sw_context->res_relocations, res,
- vmw_ptr_diff(sw_context->buf_start, id_loc),
- vmw_res_rel_normal);
- }
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ if (unlikely(ret != 0))
+ return ret;
- ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->fp->tfile,
- *id_loc,
- converter,
- &res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id_loc);
- dump_stack();
- return ret;
+ if (rcache->valid && rcache->res == res) {
+ rcache->valid_handle = true;
+ rcache->handle = *id_loc;
+ }
}
- rcache->valid = true;
- rcache->res = res;
- rcache->handle = *id_loc;
-
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
- res, &node);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ ret = vmw_resource_relocation_add(sw_context, res,
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
+ if (p_res)
+ *p_res = res;
- rcache->node = node;
- if (p_val)
- *p_val = node;
- vmw_resource_unreference(&res);
return 0;
-
-out_no_reloc:
- BUG_ON(sw_context->error_resource != NULL);
- sw_context->error_resource = res;
-
- return ret;
}
/**
@@ -861,22 +714,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
- struct vmw_resource_val_node *val;
+ struct vmw_ctx_validation_info *val;
int ret;
- list_for_each_entry(val, &sw_context->resource_list, head) {
- if (unlikely(!val->staged_bindings))
- break;
-
- ret = vmw_binding_rebind_all
- (vmw_context_binding_state(val->res));
+ list_for_each_entry(val, &sw_context->ctx_list, head) {
+ ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
- ret = vmw_rebind_all_dx_query(val->res);
+ ret = vmw_rebind_all_dx_query(val->ctx);
if (ret != 0)
return ret;
}
@@ -903,45 +752,33 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
- struct vmw_cmdbuf_res_manager *man;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
u32 i;
- int ret;
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
- man = sw_context->man;
for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding;
struct vmw_resource *view = NULL;
if (view_ids[i] != SVGA3D_INVALID_ID) {
- view = vmw_view_lookup(man, view_type, view_ids[i]);
+ view = vmw_view_id_val_add(sw_context, view_type,
+ view_ids[i]);
if (IS_ERR(view)) {
DRM_ERROR("View not found.\n");
return PTR_ERR(view);
}
-
- ret = vmw_view_res_val_add(sw_context, view);
- if (ret) {
- DRM_ERROR("Could not add view to "
- "validation list.\n");
- vmw_resource_unreference(&view);
- return ret;
- }
}
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.res = view;
binding.bi.bt = binding_type;
binding.shader_slot = shader_slot;
binding.slot = first_slot + i;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
shader_slot, binding.slot);
- if (view)
- vmw_resource_unreference(&view);
}
return 0;
@@ -971,6 +808,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
user_context_converter, &cmd->cid, NULL);
}
+/**
+ * vmw_execbuf_info_from_res - Get the private validation metadata for a
+ * recently validated resource
+ * @sw_context: Pointer to the command submission context
+ * @res: The resource
+ *
+ * The resource pointed to by @res needs to be present in the command submission
+ * context's resource cache and hence the last resource of that type to be
+ * processed by the validation code.
+ *
+ * Return: a pointer to the private metadata of the resource, or NULL
+ * if it wasn't found
+ */
+static struct vmw_ctx_validation_info *
+vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
+{
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[vmw_res_type(res)];
+
+ if (rcache->valid && rcache->res == res)
+ return rcache->private;
+
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
+
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -979,8 +844,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
@@ -993,25 +858,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.target.sid, &res_node);
- if (unlikely(ret != 0))
+ user_surface_converter, &cmd->body.target.sid,
+ &res);
+ if (unlikely(ret))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_view binding;
+ struct vmw_ctx_validation_info *node;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!node)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_rt;
binding.slot = cmd->body.type;
- vmw_binding_add(ctx_node->staged_bindings,
- &binding.bi, 0, binding.slot);
+ vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
}
return 0;
@@ -1030,8 +899,8 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (ret)
return ret;
@@ -1171,17 +1040,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
- ret = vmw_bo_to_validate_list(sw_context,
- sw_context->cur_query_bo,
- dev_priv->has_mob, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ sw_context->cur_query_bo,
+ dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ dev_priv->dummy_query_bo,
+ dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
@@ -1269,7 +1138,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
+ * a non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
@@ -1284,40 +1153,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
- if (unlikely(ret != 0)) {
+ vmw_validation_preload_bo(sw_context->ctx);
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+ if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
- ret = -EINVAL;
- goto out_no_reloc;
+ return PTR_ERR(vmw_bo);
}
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number relocations per submission"
- " exceeded\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+ vmw_user_bo_noref_release();
+ if (unlikely(ret != 0))
+ return ret;
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->mob_loc = id;
- reloc->location = NULL;
+ reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+ if (!reloc)
+ return -ENOMEM;
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ reloc->mob_loc = id;
+ reloc->vbo = vmw_bo;
*vmw_bo_p = vmw_bo;
- return 0;
+ list_add_tail(&reloc->head, &sw_context->bo_relocations);
-out_no_reloc:
- vmw_bo_unreference(&vmw_bo);
- *vmw_bo_p = NULL;
- return ret;
+ return 0;
}
/**
@@ -1328,7 +1191,7 @@ out_no_reloc:
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
+ * a non-reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
@@ -1344,39 +1207,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
SVGAGuestPtr *ptr,
struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
- if (unlikely(ret != 0)) {
+ vmw_validation_preload_bo(sw_context->ctx);
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+ if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use GMR region.\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
-
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number relocations per submission"
- " exceeded\n");
- ret = -EINVAL;
- goto out_no_reloc;
+ return PTR_ERR(vmw_bo);
}
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->location = ptr;
-
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+ vmw_user_bo_noref_release();
if (unlikely(ret != 0))
- goto out_no_reloc;
+ return ret;
+
+ reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+ if (!reloc)
+ return -ENOMEM;
+ reloc->location = ptr;
+ reloc->vbo = vmw_bo;
*vmw_bo_p = vmw_bo;
- return 0;
+ list_add_tail(&reloc->head, &sw_context->bo_relocations);
-out_no_reloc:
- vmw_bo_unreference(&vmw_bo);
- *vmw_bo_p = NULL;
- return ret;
+ return 0;
}
@@ -1400,7 +1257,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
} *cmd;
int ret;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res;
@@ -1415,9 +1272,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
- cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+ cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
- vmw_resource_unreference(&cotable_res);
return ret;
}
@@ -1462,11 +1318,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return ret;
sw_context->dx_query_mob = vmw_bo;
- sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
-
- vmw_bo_unreference(&vmw_bo);
-
- return ret;
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
+ return 0;
}
@@ -1567,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1621,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1654,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1706,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1757,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
DRM_ERROR("could not find surface for DMA.\n");
- goto out_no_surface;
+ return ret;
}
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
@@ -1765,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
header);
-out_no_surface:
- vmw_bo_unreference(&vmw_bo);
- return ret;
+ return 0;
}
static int vmw_cmd_draw(struct vmw_private *dev_priv,
@@ -1837,8 +1684,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_tex_state_cmd,
@@ -1846,7 +1693,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->state.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -1862,19 +1709,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cur_state->value, &res_node);
+ &cur_state->value, &res);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_tex binding;
+ struct vmw_ctx_validation_info *node;
+
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!node)
+ return -EINVAL;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_tex;
binding.texture_stage = cur_state->stage;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
- 0, binding.texture_stage);
+ vmw_binding_add(node->staged, &binding.bi, 0,
+ binding.texture_stage);
}
}
@@ -1893,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_bo_unreference(&vmw_bo);
-
+ return vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->body.ptr,
+ &vmw_bo);
return ret;
}
@@ -1922,25 +1769,24 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource_val_node *val_node,
+ struct vmw_resource *res,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_buffer_object *dma_buf;
+ struct vmw_buffer_object *vbo;
+ void *info;
int ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ info = vmw_execbuf_info_from_res(sw_context, res);
+ if (!info)
+ return -EINVAL;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
if (ret)
return ret;
- val_node->switching_backup = true;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
-
- vmw_bo_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
-
+ vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
+ backup_offset);
return 0;
}
@@ -1970,15 +1816,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_resource_val_node *val_node;
+ struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &val_node);
+ converter, res_id, &res);
if (ret)
return ret;
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
buf_id, backup_offset);
}
@@ -2170,14 +2016,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd;
int ret;
size_t size;
- struct vmw_resource_val_node *val;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &val);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2186,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(val->res),
+ vmw_context_res_man(ctx),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(&sw_context->res_relocations,
+ return vmw_resource_relocation_add(sw_context,
NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
@@ -2217,28 +2063,28 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
- struct vmw_resource_val_node *val;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &val);
+ &ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_shader_remove(vmw_context_res_man(val->res),
+ ret = vmw_shader_remove(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(&sw_context->res_relocations,
+ return vmw_resource_relocation_add(sw_context,
NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
@@ -2261,9 +2107,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
- struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo_shader binding;
- struct vmw_resource *res = NULL;
+ struct vmw_resource *ctx, *res = NULL;
+ struct vmw_ctx_validation_info *ctx_info;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -2277,7 +2123,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2285,34 +2131,35 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type);
if (!IS_ERR(res)) {
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- &cmd->body.shid, res,
- &res_node);
- vmw_resource_unreference(&res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (unlikely(ret != 0))
return ret;
}
}
- if (!res_node) {
+ if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader,
user_shader_converter,
- &cmd->body.shid, &res_node);
+ &cmd->body.shid, &res);
if (unlikely(ret != 0))
return ret;
}
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!ctx_info)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_info->staged, &binding.bi,
binding.shader_slot, 0);
return 0;
}
@@ -2393,8 +2240,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- struct vmw_resource_val_node *res_node = NULL;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res = NULL;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_cb binding;
int ret;
@@ -2406,12 +2253,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.sid, &res_node);
+ &cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
binding.offset = cmd->body.offsetInBytes;
@@ -2426,7 +2273,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
return -EINVAL;
}
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, binding.slot);
return 0;
@@ -2482,7 +2329,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdDXSetShader body;
} *cmd;
struct vmw_resource *res = NULL;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
@@ -2506,23 +2353,20 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, NULL);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (ret)
- goto out_unref;
+ return ret;
}
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, 0);
-out_unref:
- if (res)
- vmw_resource_unreference(&res);
- return ret;
+ return 0;
}
/**
@@ -2537,9 +2381,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_vb binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body;
@@ -2564,18 +2408,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->buf[i].sid, &res_node);
+ &cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.bt = vmw_ctx_binding_vb;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.res = res;
binding.offset = cmd->buf[i].offset;
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
@@ -2594,9 +2438,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_ib binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body;
@@ -2611,17 +2455,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.sid, &res_node);
+ &cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_ib;
binding.offset = cmd->body.offset;
binding.format = cmd->body.format;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
return 0;
}
@@ -2679,8 +2523,8 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
SVGA3dCmdDXClearRenderTargetView body;
} *cmd = container_of(header, typeof(*cmd), header);
- return vmw_view_id_val_add(sw_context, vmw_view_rt,
- cmd->body.renderTargetViewId);
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId));
}
/**
@@ -2700,16 +2544,16 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
SVGA3dCmdDXClearDepthStencilView body;
} *cmd = container_of(header, typeof(*cmd), header);
- return vmw_view_id_val_add(sw_context, vmw_view_ds,
- cmd->body.depthStencilViewId);
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId));
}
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
- struct vmw_resource_val_node *srf_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
@@ -2734,19 +2578,18 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->sid, &srf_node);
+ &cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
- res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
ret = vmw_cotable_notify(res, cmd->defined_id);
- vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
return vmw_view_add(sw_context->man,
- ctx_node->res,
- srf_node->res,
+ ctx_node->ctx,
+ srf,
view_type,
cmd->defined_id,
header,
@@ -2766,9 +2609,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_so binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body;
@@ -2793,18 +2636,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->targets[i].sid, &res_node);
+ &cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_so,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
@@ -2815,7 +2658,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
@@ -2834,10 +2677,9 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
}
so_type = vmw_so_cmd_to_type(header->id);
- res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
- vmw_resource_unreference(&res);
return ret;
}
@@ -2882,7 +2724,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
@@ -2907,7 +2749,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
@@ -2934,7 +2776,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid
* device errors.
*/
- return vmw_resource_relocation_add(&sw_context->res_relocations,
+ return vmw_resource_relocation_add(sw_context,
view,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
@@ -2953,7 +2795,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
@@ -2966,13 +2808,12 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL;
}
- res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
- vmw_resource_unreference(&res);
if (ret)
return ret;
- return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+ return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
cmd->body.shaderId, cmd->body.type,
&sw_context->staged_cmd_res);
}
@@ -2989,7 +2830,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyShader body;
@@ -3021,8 +2862,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
@@ -3033,38 +2873,33 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter,
- &cmd->body.cid, &ctx_node);
+ &cmd->body.cid, &ctx);
if (ret)
return ret;
} else {
- ctx_node = sw_context->dx_ctx_node;
- if (!ctx_node) {
+ if (!sw_context->dx_ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
+ ctx = sw_context->dx_ctx_node->ctx;
}
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, 0);
if (IS_ERR(res)) {
DRM_ERROR("Could not find shader to bind.\n");
return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, &res_node);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
- goto out_unref;
+ return ret;
}
-
- ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
- &cmd->body.mobid,
- cmd->body.offsetInBytes);
-out_unref:
- vmw_resource_unreference(&res);
-
- return ret;
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
}
/**
@@ -3083,8 +2918,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
SVGA3dCmdDXGenMips body;
} *cmd = container_of(header, typeof(*cmd), header);
- return vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId);
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId));
}
/**
@@ -3638,20 +3473,18 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
- sw_context->cur_reloc = 0;
+ /* Memory is validation context memory, so no need to free it */
+
+ INIT_LIST_HEAD(&sw_context->bo_relocations);
}
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
- uint32_t i;
struct vmw_relocation *reloc;
- struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
- for (i = 0; i < sw_context->cur_reloc; ++i) {
- reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index].base;
- bo = validate->bo;
+ list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
+ bo = &reloc->vbo->base;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
@@ -3670,110 +3503,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
vmw_free_relocations(sw_context);
}
-/**
- * vmw_resource_list_unrefererence - Free up a resource list and unreference
- * all resources referenced by it.
- *
- * @list: The resource list.
- */
-static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
- struct list_head *list)
-{
- struct vmw_resource_val_node *val, *val_next;
-
- /*
- * Drop references to resources held during command submission.
- */
-
- list_for_each_entry_safe(val, val_next, list, head) {
- list_del_init(&val->head);
- vmw_resource_unreference(&val->res);
-
- if (val->staged_bindings) {
- if (val->staged_bindings != sw_context->staged_bindings)
- vmw_binding_state_free(val->staged_bindings);
- else
- sw_context->staged_bindings_inuse = false;
- val->staged_bindings = NULL;
- }
-
- kfree(val);
- }
-}
-
-static void vmw_clear_validations(struct vmw_sw_context *sw_context)
-{
- struct vmw_validate_buffer *entry, *next;
- struct vmw_resource_val_node *val;
-
- /*
- * Drop references to DMA buffers held during command submission.
- */
- list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- base.head) {
- list_del(&entry->base.head);
- ttm_bo_unref(&entry->base.bo);
- (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
- sw_context->cur_val_buf--;
- }
- BUG_ON(sw_context->cur_val_buf != 0);
-
- list_for_each_entry(val, &sw_context->resource_list, head)
- (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
-}
-
-int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
-{
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
- struct ttm_operation_ctx ctx = { interruptible, false };
- int ret;
-
- if (vbo->pin_count > 0)
- return 0;
-
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
-
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
- return ret;
-}
-
-static int vmw_validate_buffers(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context)
-{
- struct vmw_validate_buffer *entry;
- int ret;
-
- list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
- true,
- entry->validate_as_mob);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
-}
-
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
@@ -3946,7 +3675,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
if (sw_context->dx_ctx_node)
cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
- sw_context->dx_ctx_node->res->id);
+ sw_context->dx_ctx_node->ctx->id);
else
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) {
@@ -3980,7 +3709,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
u32 command_size,
struct vmw_sw_context *sw_context)
{
- u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
id, false, header);
@@ -4057,31 +3786,35 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t handle)
{
- struct vmw_resource_val_node *ctx_node;
struct vmw_resource *res;
int ret;
+ unsigned int size;
if (handle == SVGA3D_INVALID_ID)
return 0;
- ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
- handle, user_context_converter,
- &res);
- if (unlikely(ret != 0)) {
+ size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
+ ret = vmw_validation_preload_res(sw_context->ctx, size);
+ if (ret)
+ return ret;
+
+ res = vmw_user_resource_noref_lookup_handle
+ (dev_priv, sw_context->fp->tfile, handle,
+ user_context_converter);
+ if (unlikely(IS_ERR(res))) {
DRM_ERROR("Could not find or user DX context 0x%08x.\n",
(unsigned) handle);
- return ret;
+ return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res);
if (unlikely(ret != 0))
- goto out_err;
+ return ret;
- sw_context->dx_ctx_node = ctx_node;
+ sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
sw_context->man = vmw_context_res_man(res);
-out_err:
- vmw_resource_unreference(&res);
- return ret;
+
+ return 0;
}
int vmw_execbuf_process(struct drm_file *file_priv,
@@ -4097,15 +3830,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
- struct vmw_resource *error_resource;
- struct list_head resource_list;
struct vmw_cmdbuf_header *header;
- struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
-
+ DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -4157,10 +3887,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv);
- sw_context->cur_reloc = 0;
- sw_context->cur_val_buf = 0;
- INIT_LIST_HEAD(&sw_context->resource_list);
- INIT_LIST_HEAD(&sw_context->ctx_resource_list);
+ INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
@@ -4168,8 +3895,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
- INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
+ INIT_LIST_HEAD(&sw_context->bo_relocations);
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
@@ -4180,24 +3907,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
- INIT_LIST_HEAD(&resource_list);
+ sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
- if (unlikely(ret != 0)) {
- list_splice_init(&sw_context->ctx_resource_list,
- &sw_context->resource_list);
+ if (unlikely(ret != 0))
goto out_err_nores;
- }
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
- /*
- * Merge the resource lists before checking the return status
- * from vmd_cmd_check_all so that all the open hashtabs will
- * be handled properly even if vmw_cmd_check_all fails.
- */
- list_splice_init(&sw_context->ctx_resource_list,
- &sw_context->resource_list);
-
if (unlikely(ret != 0))
goto out_err_nores;
@@ -4205,18 +3921,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
- true, NULL);
+ ret = vmw_validation_bo_reserve(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err_nores;
- ret = vmw_validate_buffers(dev_priv, sw_context);
+ ret = vmw_validation_bo_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
- ret = vmw_resources_validate(sw_context);
+ ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
+ vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
@@ -4255,17 +3971,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resources_unreserve(sw_context, false);
+ vmw_execbuf_bindings_commit(sw_context, false);
+ vmw_bind_dx_query_mob(sw_context);
+ vmw_validation_res_unreserve(&val_ctx, false);
- ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
- (void *) fence);
+ vmw_validation_bo_fence(sw_context->ctx, fence);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
- vmw_clear_validations(sw_context);
-
/*
* If anything fails here, give up trying to export the fence
* and do a sync since the user mode will not be able to sync
@@ -4300,7 +4015,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
- list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
@@ -4308,36 +4022,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(sw_context, &resource_list);
+ vmw_validation_unref_lists(&val_ctx);
return 0;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
- ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+ vmw_validation_bo_backoff(&val_ctx);
out_err_nores:
- vmw_resources_unreserve(sw_context, true);
+ vmw_execbuf_bindings_commit(sw_context, true);
+ vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
- vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
- list_splice_init(&sw_context->resource_list, &resource_list);
- error_resource = sw_context->error_resource;
- sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
+ vmw_validation_drop_ht(&val_ctx);
+ WARN_ON(!list_empty(&sw_context->ctx_list));
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(sw_context, &resource_list);
- if (unlikely(error_resource != NULL))
- vmw_resource_unreference(&error_resource);
+ vmw_validation_unref_lists(&val_ctx);
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
@@ -4398,38 +4109,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
{
int ret = 0;
- struct list_head validate_list;
- struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL;
- struct ww_acquire_ctx ticket;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- INIT_LIST_HEAD(&validate_list);
-
- pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
- pinned_val.shared = false;
- list_add_tail(&pinned_val.head, &validate_list);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
+ false);
+ if (ret)
+ goto out_no_reserve;
- query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
- query_val.shared = false;
- list_add_tail(&query_val.head, &validate_list);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
+ false);
+ if (ret)
+ goto out_no_reserve;
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
- false, NULL);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
+ ret = vmw_validation_bo_reserve(&val_ctx, false);
+ if (ret)
goto out_no_reserve;
- }
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
+ if (ret)
goto out_no_emit;
- }
dev_priv->query_cid_valid = false;
}
@@ -4443,22 +4147,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
NULL);
fence = lfence;
}
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
+ vmw_validation_bo_fence(&val_ctx, fence);
if (lfence != NULL)
vmw_fence_obj_unreference(&lfence);
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
+ vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
out_no_emit:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
+ vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
+ vmw_validation_unref_lists(&val_ctx);
+ vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
+
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 3d546d409334..f87261545f2c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+ fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
+ TTM_OBJ_EXTRA_SIZE;
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
@@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
}
*p_fence = &ufence->fence;
- *p_handle = ufence->base.hash.key;
+ *p_handle = ufence->base.handle;
return 0;
out_err:
@@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
"object.\n");
goto out_no_ref_obj;
}
- handle = base->hash.key;
+ handle = base->handle;
}
ttm_base_object_unref(&base);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index c769df9e257b..e6b11f6ae2e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2575,88 +2575,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
- * command submission.
- *
- * @dev_priv. Pointer to a device private structure.
- * @buf: The buffer object
- * @interruptible: Whether to perform waits as interruptible.
- * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
- * The buffer will be validated as a GMR. Already pinned buffers will not be
- * validated.
- *
- * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
- * interrupted by a signal.
+ * vmw_kms_helper_validation_finish - Helper for post KMS command submission
+ * cleanup and fencing
+ * @dev_priv: Pointer to the device-private struct
+ * @file_priv: Pointer identifying the client when user-space fencing is used
+ * @ctx: Pointer to the validation context
+ * @out_fence: If non-NULL, returned refcounted fence-pointer
+ * @user_fence_rep: If non-NULL, pointer to user-space address area
+ * in which to copy user-space fence info
*/
-int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible,
- bool validate_as_mob,
- bool for_cpu_blit)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = interruptible,
- .no_wait_gpu = false};
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
-
- ttm_bo_reserve(bo, false, false, NULL);
- if (for_cpu_blit)
- ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
- else
- ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
- validate_as_mob);
- if (ret)
- ttm_bo_unreserve(bo);
-
- return ret;
-}
-
-/**
- * vmw_kms_helper_buffer_revert - Undo the actions of
- * vmw_kms_helper_buffer_prepare.
- *
- * @res: Pointer to the buffer object.
- *
- * Helper to be used if an error forces the caller to undo the actions of
- * vmw_kms_helper_buffer_prepare.
- */
-void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
-{
- if (buf)
- ttm_bo_unreserve(&buf->base);
-}
-
-/**
- * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
- * kms command submission.
- *
- * @dev_priv: Pointer to a device private structure.
- * @file_priv: Pointer to a struct drm_file representing the caller's
- * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
- * if non-NULL, @user_fence_rep must be non-NULL.
- * @buf: The buffer object.
- * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
- * ref-counted fence pointer is returned here.
- * @user_fence_rep: Optional pointer to a user-space provided struct
- * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
- * function copies fence data to user-space in a fail-safe manner.
- */
-void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_buffer_object *buf,
- struct vmw_fence_obj **out_fence,
- struct drm_vmw_fence_rep __user *
- user_fence_rep)
-{
- struct vmw_fence_obj *fence;
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_validation_context *ctx,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep)
+{
+ struct vmw_fence_obj *fence = NULL;
uint32_t handle;
int ret;
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
- file_priv ? &handle : NULL);
- if (buf)
- vmw_bo_fence_single(&buf->base, fence);
+ if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
+ out_fence)
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+ file_priv ? &handle : NULL);
+ vmw_validation_done(ctx, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
@@ -2665,106 +2608,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
*out_fence = fence;
else
vmw_fence_obj_unreference(&fence);
-
- vmw_kms_helper_buffer_revert(buf);
-}
-
-
-/**
- * vmw_kms_helper_resource_revert - Undo the actions of
- * vmw_kms_helper_resource_prepare.
- *
- * @res: Pointer to the resource. Typically a surface.
- *
- * Helper to be used if an error forces the caller to undo the actions of
- * vmw_kms_helper_resource_prepare.
- */
-void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
-{
- struct vmw_resource *res = ctx->res;
-
- vmw_kms_helper_buffer_revert(ctx->buf);
- vmw_bo_unreference(&ctx->buf);
- vmw_resource_unreserve(res, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-}
-
-/**
- * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
- * command submission.
- *
- * @res: Pointer to the resource. Typically a surface.
- * @interruptible: Whether to perform waits as interruptible.
- *
- * Reserves and validates also the backup buffer if a guest-backed resource.
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
- * interrupted by a signal.
- */
-int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible,
- struct vmw_validation_ctx *ctx)
-{
- int ret = 0;
-
- ctx->buf = NULL;
- ctx->res = res;
-
- if (interruptible)
- ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
- else
- mutex_lock(&res->dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
-
- ret = vmw_resource_reserve(res, interruptible, false);
- if (ret)
- goto out_unlock;
-
- if (res->backup) {
- ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
- interruptible,
- res->dev_priv->has_mob,
- false);
- if (ret)
- goto out_unreserve;
-
- ctx->buf = vmw_bo_reference(res->backup);
- }
- ret = vmw_resource_validate(res);
- if (ret)
- goto out_revert;
- return 0;
-
-out_revert:
- vmw_kms_helper_buffer_revert(ctx->buf);
-out_unreserve:
- vmw_resource_unreserve(res, false, NULL, 0);
-out_unlock:
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
- return ret;
-}
-
-/**
- * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
- * kms command submission.
- *
- * @res: Pointer to the resource. Typically a surface.
- * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
- * ref-counted fence pointer is returned here.
- */
-void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_resource *res = ctx->res;
-
- if (ctx->buf || out_fence)
- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
- out_fence, NULL);
-
- vmw_bo_unreference(&ctx->buf);
- vmw_resource_unreserve(res, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 31311298ec0b..76ec570c0684 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment,
struct vmw_kms_dirty *dirty);
-int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible,
- bool validate_as_mob,
- bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
-void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_buffer_object *buf,
- struct vmw_fence_obj **out_fence,
- struct drm_vmw_fence_rep __user *
- user_fence_rep);
-int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible,
- struct vmw_validation_ctx *ctx);
-void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
-void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
- struct vmw_fence_obj **out_fence);
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_validation_context *ctx,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 0861c821a7fe..e420675e8db3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -31,8 +31,8 @@
*/
#include "vmwgfx_drv.h"
+#include "ttm_object.h"
#include <linux/dma-buf.h>
-#include <drm/ttm/ttm_object.h>
/*
* DMA-BUF attach- and mapping methods. No need to implement
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 92003ea5a219..8a029bade32a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (res->id != -1)
idr_remove(idr, res->id);
res->id = -1;
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
static void vmw_resource_release(struct kref *kref)
@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
- write_lock(&dev_priv->resource_lock);
- res->avail = false;
+ spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
else
kfree(res);
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (id != -1)
idr_remove(idr, id);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
BUG_ON(res->id != -1);
idr_preload(GFP_KERNEL);
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
if (ret >= 0)
res->id = ret;
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->avail = false;
res->dev_priv = dev_priv;
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
return vmw_resource_alloc_id(res);
}
-/**
- * vmw_resource_activate
- *
- * @res: Pointer to the newly created resource
- * @hw_destroy: Destroy function. NULL if none.
- *
- * Activate a resource after the hardware has been made aware of it.
- * Set tye destroy function to @destroy. Typically this frees the
- * resource and destroys the hardware resources associated with it.
- * Activate basically means that the function vmw_resource_lookup will
- * find it.
- */
-void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
-{
- struct vmw_private *dev_priv = res->dev_priv;
-
- write_lock(&dev_priv->resource_lock);
- res->avail = true;
- res->hw_destroy = hw_destroy;
- write_unlock(&dev_priv->resource_lock);
-}
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
@@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
-
- read_lock(&dev_priv->resource_lock);
- if (!res->avail || res->res_free != converter->res_free) {
- read_unlock(&dev_priv->resource_lock);
- goto out_bad_resource;
- }
-
kref_get(&res->kref);
- read_unlock(&dev_priv->resource_lock);
*p_res = res;
ret = 0;
@@ -263,6 +231,41 @@ out_bad_resource:
}
/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter)
+{
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_noref_lookup(tfile, handle);
+ if (!base)
+ return ERR_PTR(-ESRCH);
+
+ if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
+ ttm_base_object_noref_release();
+ return ERR_PTR(-EINVAL);
+ }
+
+ return converter->base_obj_to_res(base);
+}
+
+/**
* Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
@@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_add_tail(&res->lru_head,
&res->dev_priv->res_lru[res->func->res_type]);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
/**
@@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
@@ -587,15 +590,18 @@ out_no_unbind:
/**
* vmw_resource_validate - Make a resource up-to-date and visible
* to the device.
- *
- * @res: The resource to make visible to the device.
+ * @res: The resource to make visible to the device.
+ * @intr: Perform waits interruptible if possible.
*
* On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on failure.
*/
-int vmw_resource_validate(struct vmw_resource *res)
+int vmw_resource_validate(struct vmw_resource *res, bool intr)
{
int ret;
struct vmw_resource *evict_res;
@@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res)
if (likely(ret != -EBUSY))
break;
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
break;
}
@@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res)
lru_head));
list_del_init(&evict_res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
/* Trylock backup buffers with a NULL ticket. */
- ret = vmw_resource_do_evict(NULL, evict_res, true);
+ ret = vmw_resource_do_evict(NULL, evict_res, intr);
if (unlikely(ret != 0)) {
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (ret == -ERESTARTSYS ||
++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
@@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct ww_acquire_ctx ticket;
do {
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list))
goto out_unlock;
@@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
/* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
return;
@@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
} while (1);
out_unlock:
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
/**
@@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
- ret = vmw_resource_validate(res);
+ ret = vmw_resource_validate(res, interruptible);
if (vbo)
ttm_bo_unreserve(&vbo->base);
if (ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index a8c1c5ebd71d..7e19eba0b0b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -30,6 +30,11 @@
#include "vmwgfx_drv.h"
+/*
+ * Extra memory required by the resource id's ida storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per ida.
+ */
#define VMW_IDA_ACC_SIZE 128
enum vmw_cmdbuf_res_state {
@@ -120,8 +125,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id,
void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func);
-void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *));
int
vmw_simple_resource_create_ioctl(struct drm_device *dev,
void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4c68ad6f3605..333418dc259f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -945,16 +945,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty;
- struct vmw_validation_ctx ctx;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+ if (ret)
+ goto out_unref;
+
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
sdirty.base.clip = vmw_sou_surface_clip;
sdirty.base.dev_priv = dev_priv;
@@ -971,9 +975,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
- vmw_kms_helper_resource_finish(&ctx, out_fence);
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
return ret;
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
}
/**
@@ -1050,13 +1059,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false, false);
+ ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ if (ret)
+ goto out_unref;
+
ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0))
goto out_revert;
@@ -1068,12 +1081,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
- vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
return ret;
out_revert:
- vmw_kms_helper_buffer_revert(buf);
+ vmw_validation_revert(&val_ctx);
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
return ret;
}
@@ -1149,13 +1165,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
- false);
+ ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, NULL, true);
+ if (ret)
+ goto out_unref;
+
ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0))
goto out_revert;
@@ -1167,13 +1187,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
0, 0, num_clips, 1, &dirty);
- vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
- user_fence_rep);
+ vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+ user_fence_rep);
return ret;
out_revert:
- vmw_kms_helper_buffer_revert(buf);
-
+ vmw_validation_revert(&val_ctx);
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index fe4842ca3b6e..bf32fe446219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
shader->num_input_sig = num_input_sig;
shader->num_output_sig = num_output_sig;
- vmw_resource_activate(res, vmw_hw_shader_destroy);
+ res->hw_destroy = vmw_hw_shader_destroy;
return 0;
}
@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
{
struct vmw_dx_shader *entry, *next;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) {
WARN_ON(vmw_dx_shader_scrub(&entry->res));
@@ -636,7 +636,8 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
res = &shader->res;
shader->ctx = ctx;
- shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+ shader->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
shader->id = user_key;
shader->committed = false;
INIT_LIST_HEAD(&shader->cotable_head);
@@ -656,7 +657,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init;
res->id = shader->id;
- vmw_resource_activate(res, vmw_hw_shader_destroy);
+ res->hw_destroy = vmw_hw_shader_destroy;
out_resource_init:
vmw_resource_unreference(&res);
@@ -740,13 +741,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
};
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
- ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+ ttm_round_pot(sizeof(struct vmw_user_shader)) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
@@ -792,7 +790,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
if (handle)
- *handle = ushader->base.hash.key;
+ *handle = ushader->base.handle;
out_err:
vmw_resource_unreference(&res);
out:
@@ -814,13 +812,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
};
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
if (unlikely(vmw_shader_size == 0))
vmw_shader_size =
- ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+ ttm_round_pot(sizeof(struct vmw_shader)) +
+ VMW_IDA_ACC_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 6ebc5affde14..6a6865384e91 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
return ret;
}
- vmw_resource_activate(&simple->res, simple->func->hw_destroy);
+ simple->res.hw_destroy = simple->func->hw_destroy;
return 0;
}
@@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
- account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
+ account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
+ TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (ret)
@@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
goto out_err;
}
- func->set_arg_handle(data, usimple->base.hash.key);
+ func->set_arg_handle(data, usimple->base.handle);
out_err:
vmw_resource_unreference(&res);
out_ret:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index e9b6b7baa009..bc8bb690f1ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res)
union vmw_view_destroy body;
} *cmd;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
if (!view->committed || res->id == -1)
@@ -366,7 +366,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
res = &view->res;
view->ctx = ctx;
view->srf = vmw_resource_reference(srf);
- view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+ view->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
view->view_type = view_type;
view->view_id = user_key;
view->cmd_size = cmd_size;
@@ -386,7 +387,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init;
res->id = view->view_id;
- vmw_resource_activate(res, vmw_hw_view_destroy);
+ res->hw_destroy = vmw_hw_view_destroy;
out_resource_init:
vmw_resource_unreference(&res);
@@ -439,7 +440,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
{
struct vmw_view *entry, *next;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head)
WARN_ON(vmw_view_destroy(&entry->res));
@@ -459,7 +460,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
{
struct vmw_view *entry, *next;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, srf_head)
WARN_ON(vmw_view_destroy(&entry->res));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index e28bb08114a5..c3e435f444c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct vmw_stdu_dirty ddirty;
int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/*
* VMs without 3D support don't have the surface DMA command and
* we'll be using a CPU blit, and the framebuffer should be moved out
* of VRAM.
*/
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false, cpu_blit);
+ ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ if (ret)
+ goto out_unref;
+
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base);
- vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
- user_fence_rep);
+ vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+ user_fence_rep);
+ return ret;
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
return ret;
}
@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty;
- struct vmw_validation_ctx ctx;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+ if (ret)
+ goto out_unref;
+
if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret)
@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
out_finish:
- vmw_kms_helper_resource_finish(&ctx, out_fence);
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
+
+ return ret;
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 80a01cd4c051..ef09f7edf931 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
*/
INIT_LIST_HEAD(&srf->view_list);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
+ res->hw_destroy = vmw_hw_surface_destroy;
return ret;
}
@@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
@@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0)
return -EINVAL;
- size = vmw_user_surface_size + 128 +
+ size = vmw_user_surface_size +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
@@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->sid = user_srf->prime.base.hash.key;
+ rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
ttm_read_unlock(&dev_priv->reservation_sem);
@@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
- ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1613,9 +1613,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
- size = vmw_user_surface_size + 128;
+ size = vmw_user_surface_size;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
@@ -1687,7 +1687,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock;
}
- rep->handle = user_srf->prime.base.hash.key;
+ rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
@@ -1749,7 +1749,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+ (void) ttm_ref_object_base_unref(tfile, base->handle,
TTM_REF_USAGE);
goto out_bad_resource;
}
@@ -1763,7 +1763,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.array_size = srf->array_size;
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = srf->base_size;
- rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
new file mode 100644
index 000000000000..184025fa938e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include <linux/slab.h>
+#include "vmwgfx_validation.h"
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_validation_bo_node - Buffer object validation metadata.
+ * @base: Metadata used for TTM reservation- and validation.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @as_mob: Validate as mob.
+ * @cpu_blit: Validate for cpu blit access.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_bo_node {
+ struct ttm_validate_buffer base;
+ struct drm_hash_item hash;
+ u32 as_mob : 1;
+ u32 cpu_blit : 1;
+};
+
+/**
+ * struct vmw_validation_res_node - Resource validation metadata.
+ * @head: List head for the resource validation list.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @res: Reference counted resource pointer.
+ * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
+ * to a resource.
+ * @new_backup_offset: Offset into the new backup mob for resources that can
+ * share MOBs.
+ * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
+ * the command stream provides a mob bind operation.
+ * @switching_backup: The validation process is switching backup MOB.
+ * @first_usage: True iff the resource has been seen only once in the current
+ * validation batch.
+ * @reserved: Whether the resource is currently reserved by this process.
+ * @private: Optionally additional memory for caller-private data.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_res_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_buffer_object *new_backup;
+ unsigned long new_backup_offset;
+ u32 no_buffer_needed : 1;
+ u32 switching_backup : 1;
+ u32 first_usage : 1;
+ u32 reserved : 1;
+ unsigned long private[0];
+};
+
+/**
+ * vmw_validation_mem_alloc - Allocate kernel memory from the validation
+ * context based allocator
+ * @ctx: The validation context
+ * @size: The number of bytes to allocated.
+ *
+ * The memory allocated may not exceed PAGE_SIZE, and the returned
+ * address is aligned to sizeof(long). All memory allocated this way is
+ * reclaimed after validation when calling any of the exported functions:
+ * vmw_validation_unref_lists()
+ * vmw_validation_revert()
+ * vmw_validation_done()
+ *
+ * Return: Pointer to the allocated memory on success. NULL on failure.
+ */
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+ unsigned int size)
+{
+ void *addr;
+
+ size = vmw_validation_align(size);
+ if (size > PAGE_SIZE)
+ return NULL;
+
+ if (ctx->mem_size_left < size) {
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+ if (!page)
+ return NULL;
+
+ list_add_tail(&page->lru, &ctx->page_list);
+ ctx->page_address = page_address(page);
+ ctx->mem_size_left = PAGE_SIZE;
+ }
+
+ addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
+ ctx->mem_size_left -= size;
+
+ return addr;
+}
+
+/**
+ * vmw_validation_mem_free - Free all memory allocated using
+ * vmw_validation_mem_alloc()
+ * @ctx: The validation context
+ *
+ * All memory previously allocated for this context using
+ * vmw_validation_mem_alloc() is freed.
+ */
+static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
+{
+ struct page *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
+ list_del_init(&entry->lru);
+ __free_page(entry);
+ }
+
+ ctx->mem_size_left = 0;
+}
+
+/**
+ * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_bo_node *
+vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
+ struct vmw_buffer_object *vbo)
+{
+ struct vmw_validation_bo_node *bo_node = NULL;
+
+ if (!ctx->merge_dups)
+ return NULL;
+
+ if (ctx->ht) {
+ struct drm_hash_item *hash;
+
+ if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+ bo_node = container_of(hash, typeof(*bo_node), hash);
+ } else {
+ struct vmw_validation_bo_node *entry;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->base.bo == &vbo->base) {
+ bo_node = entry;
+ break;
+ }
+ }
+ }
+
+ return bo_node;
+}
+
+/**
+ * vmw_validation_find_res_dup - Find a duplicate resource entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_res_node *
+vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
+ struct vmw_resource *res)
+{
+ struct vmw_validation_res_node *res_node = NULL;
+
+ if (!ctx->merge_dups)
+ return NULL;
+
+ if (ctx->ht) {
+ struct drm_hash_item *hash;
+
+ if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+ res_node = container_of(hash, typeof(*res_node), hash);
+ } else {
+ struct vmw_validation_res_node *entry;
+
+ list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
+ if (entry->res == res) {
+ res_node = entry;
+ goto out;
+ }
+ }
+
+ list_for_each_entry(entry, &ctx->resource_list, head) {
+ if (entry->res == res) {
+ res_node = entry;
+ break;
+ }
+ }
+
+ }
+out:
+ return res_node;
+}
+
+/**
+ * vmw_validation_add_bo - Add a buffer object to the validation context.
+ * @ctx: The validation context.
+ * @vbo: The buffer object.
+ * @as_mob: Validate as mob, otherwise suitable for GMR operations.
+ * @cpu_blit: Validate in a page-mappable location.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+ struct vmw_buffer_object *vbo,
+ bool as_mob,
+ bool cpu_blit)
+{
+ struct vmw_validation_bo_node *bo_node;
+
+ bo_node = vmw_validation_find_bo_dup(ctx, vbo);
+ if (bo_node) {
+ if (bo_node->as_mob != as_mob ||
+ bo_node->cpu_blit != cpu_blit) {
+ DRM_ERROR("Inconsistent buffer usage.\n");
+ return -EINVAL;
+ }
+ } else {
+ struct ttm_validate_buffer *val_buf;
+ int ret;
+
+ bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
+ if (!bo_node)
+ return -ENOMEM;
+
+ if (ctx->ht) {
+ bo_node->hash.key = (unsigned long) vbo;
+ ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+ if (ret) {
+ DRM_ERROR("Failed to initialize a buffer "
+ "validation entry.\n");
+ return ret;
+ }
+ }
+ val_buf = &bo_node->base;
+ val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+ if (!val_buf->bo)
+ return -ESRCH;
+ val_buf->shared = false;
+ list_add_tail(&val_buf->head, &ctx->bo_list);
+ bo_node->as_mob = as_mob;
+ bo_node->cpu_blit = cpu_blit;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_validation_add_resource - Add a resource to the validation context.
+ * @ctx: The validation context.
+ * @res: The resource.
+ * @priv_size: Size of private, additional metadata.
+ * @p_node: Output pointer of additional metadata address.
+ * @first_usage: Whether this was the first time this resource was seen.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ struct vmw_resource *res,
+ size_t priv_size,
+ void **p_node,
+ bool *first_usage)
+{
+ struct vmw_validation_res_node *node;
+ int ret;
+
+ node = vmw_validation_find_res_dup(ctx, res);
+ if (node) {
+ node->first_usage = 0;
+ goto out_fill;
+ }
+
+ node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
+ if (!node) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ if (ctx->ht) {
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(ctx->ht, &node->hash);
+ if (ret) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ return ret;
+ }
+ }
+ node->res = vmw_resource_reference_unless_doomed(res);
+ if (!node->res)
+ return -ESRCH;
+
+ node->first_usage = 1;
+ if (!res->dev_priv->has_mob) {
+ list_add_tail(&node->head, &ctx->resource_list);
+ } else {
+ switch (vmw_res_type(res)) {
+ case vmw_res_context:
+ case vmw_res_dx_context:
+ list_add(&node->head, &ctx->resource_ctx_list);
+ break;
+ case vmw_res_cotable:
+ list_add_tail(&node->head, &ctx->resource_ctx_list);
+ break;
+ default:
+ list_add_tail(&node->head, &ctx->resource_list);
+ break;
+ }
+ }
+
+out_fill:
+ if (first_usage)
+ *first_usage = node->first_usage;
+ if (p_node)
+ *p_node = &node->private;
+
+ return 0;
+}
+
+/**
+ * vmw_validation_res_switch_backup - Register a backup MOB switch during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @vbo: The new backup buffer object MOB. This buffer object needs to have
+ * already been registered with the validation context.
+ * @backup_offset: Offset into the new backup MOB.
+ */
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+ void *val_private,
+ struct vmw_buffer_object *vbo,
+ unsigned long backup_offset)
+{
+ struct vmw_validation_res_node *val;
+
+ val = container_of(val_private, typeof(*val), private);
+
+ val->switching_backup = 1;
+ if (val->first_usage)
+ val->no_buffer_needed = 1;
+
+ val->new_backup = vbo;
+ val->new_backup_offset = backup_offset;
+}
+
+/**
+ * vmw_validation_res_reserve - Reserve all resources registered with this
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Use interruptible waits when possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+ bool intr)
+{
+ struct vmw_validation_res_node *val;
+ int ret = 0;
+
+ list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ struct vmw_resource *res = val->res;
+
+ ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
+ if (ret)
+ goto out_unreserve;
+
+ val->reserved = 1;
+ if (res->backup) {
+ struct vmw_buffer_object *vbo = res->backup;
+
+ ret = vmw_validation_add_bo
+ (ctx, vbo, vmw_resource_needs_backup(res),
+ false);
+ if (ret)
+ goto out_unreserve;
+ }
+ }
+
+ return 0;
+
+out_unreserve:
+ vmw_validation_res_unreserve(ctx, true);
+ return ret;
+}
+
+/**
+ * vmw_validation_res_unreserve - Unreserve all reserved resources
+ * registered with this validation context.
+ * @ctx: The validation context.
+ * @backoff: Whether this is a backoff- of a commit-type operation. This
+ * is used to determine whether to switch backup MOBs or not.
+ */
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+ bool backoff)
+{
+ struct vmw_validation_res_node *val;
+
+ list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ !backoff &&
+ val->switching_backup,
+ val->new_backup,
+ val->new_backup_offset);
+ }
+}
+
+/**
+ * vmw_validation_bo_validate_single - Validate a single buffer object.
+ * @bo: The TTM buffer object base.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @validate_as_mob: Whether to validate in MOB memory.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob)
+{
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (vbo->pin_count > 0)
+ return 0;
+
+ if (validate_as_mob)
+ return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+
+ /**
+ * Put BO in VRAM if there is space, otherwise as a GMR.
+ * If there is no space in VRAM and GMR ids are all used up,
+ * start evicting GMRs to make room. If the DMA buffer can't be
+ * used as a GMR, this will return -ENOMEM.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ if (ret == 0 || ret == -ERESTARTSYS)
+ return ret;
+
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+ return ret;
+}
+
+/**
+ * vmw_validation_bo_validate - Validate all buffer objects registered with
+ * the validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
+{
+ struct vmw_validation_bo_node *entry;
+ int ret;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->cpu_blit) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = intr,
+ .no_wait_gpu = false
+ };
+
+ ret = ttm_bo_validate(entry->base.bo,
+ &vmw_nonfixed_placement, &ctx);
+ } else {
+ ret = vmw_validation_bo_validate_single
+ (entry->base.bo, intr, entry->as_mob);
+ }
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * vmw_validation_res_validate - Validate all resources registered with the
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
+{
+ struct vmw_validation_res_node *val;
+ int ret;
+
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_buffer_object *backup = res->backup;
+
+ ret = vmw_resource_validate(res, intr);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+
+ /* Check if the resource switched backup buffer */
+ if (backup && res->backup && (backup != res->backup)) {
+ struct vmw_buffer_object *vbo = res->backup;
+
+ ret = vmw_validation_add_bo
+ (ctx, vbo, vmw_resource_needs_backup(res),
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
+ * and unregister it from this validation context.
+ * @ctx: The validation context.
+ *
+ * The hash table used for duplicate finding is an expensive resource and
+ * may be protected by mutexes that may cause deadlocks during resource
+ * unreferencing if held. After resource- and buffer object registering,
+ * there is no longer any use for this hash table, so allow freeing it
+ * either to shorten any mutex locking time, or before resources- and
+ * buffer objects are freed during validation context cleanup.
+ */
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+ struct vmw_validation_res_node *val;
+
+ if (!ctx->ht)
+ return;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head)
+ (void) drm_ht_remove_item(ctx->ht, &entry->hash);
+
+ list_for_each_entry(val, &ctx->resource_list, head)
+ (void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+ list_for_each_entry(val, &ctx->resource_ctx_list, head)
+ (void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+ ctx->ht = NULL;
+}
+
+/**
+ * vmw_validation_unref_lists - Unregister previously registered buffer
+ * object and resources.
+ * @ctx: The validation context.
+ *
+ * Note that this function may cause buffer object- and resource destructors
+ * to be invoked.
+ */
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+ struct vmw_validation_res_node *val;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head)
+ ttm_bo_unref(&entry->base.bo);
+
+ list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+ list_for_each_entry(val, &ctx->resource_list, head)
+ vmw_resource_unreference(&val->res);
+
+ /*
+ * No need to detach each list entry since they are all freed with
+ * vmw_validation_free_mem. Just make the inaccessible.
+ */
+ INIT_LIST_HEAD(&ctx->bo_list);
+ INIT_LIST_HEAD(&ctx->resource_list);
+
+ vmw_validation_mem_free(ctx);
+}
+
+/**
+ * vmw_validation_prepare - Prepare a validation context for command
+ * submission.
+ * @ctx: The validation context.
+ * @mutex: The mutex used to protect resource reservation.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Note that the single reservation mutex @mutex is an unfortunate
+ * construct. Ideally resource reservation should be moved to per-resource
+ * ww_mutexes.
+ * If this functions doesn't return Zero to indicate success, all resources
+ * are left unreserved but still referenced.
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on error.
+ */
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+ struct mutex *mutex,
+ bool intr)
+{
+ int ret = 0;
+
+ if (mutex) {
+ if (intr)
+ ret = mutex_lock_interruptible(mutex);
+ else
+ mutex_lock(mutex);
+ if (ret)
+ return -ERESTARTSYS;
+ }
+
+ ctx->res_mutex = mutex;
+ ret = vmw_validation_res_reserve(ctx, intr);
+ if (ret)
+ goto out_no_res_reserve;
+
+ ret = vmw_validation_bo_reserve(ctx, intr);
+ if (ret)
+ goto out_no_bo_reserve;
+
+ ret = vmw_validation_bo_validate(ctx, intr);
+ if (ret)
+ goto out_no_validate;
+
+ ret = vmw_validation_res_validate(ctx, intr);
+ if (ret)
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ vmw_validation_bo_backoff(ctx);
+out_no_bo_reserve:
+ vmw_validation_res_unreserve(ctx, true);
+out_no_res_reserve:
+ if (mutex)
+ mutex_unlock(mutex);
+
+ return ret;
+}
+
+/**
+ * vmw_validation_revert - Revert validation actions if command submission
+ * failed.
+ *
+ * @ctx: The validation context.
+ *
+ * The caller still needs to unref resources after a call to this function.
+ */
+void vmw_validation_revert(struct vmw_validation_context *ctx)
+{
+ vmw_validation_bo_backoff(ctx);
+ vmw_validation_res_unreserve(ctx, true);
+ if (ctx->res_mutex)
+ mutex_unlock(ctx->res_mutex);
+ vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_cone - Commit validation actions after command submission
+ * success.
+ * @ctx: The validation context.
+ * @fence: Fence with which to fence all buffer objects taking part in the
+ * command submission.
+ *
+ * The caller does NOT need to unref resources after a call to this function.
+ */
+void vmw_validation_done(struct vmw_validation_context *ctx,
+ struct vmw_fence_obj *fence)
+{
+ vmw_validation_bo_fence(ctx, fence);
+ vmw_validation_res_unreserve(ctx, false);
+ if (ctx->res_mutex)
+ mutex_unlock(ctx->res_mutex);
+ vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_preload_bo - Preload the validation memory allocator for a
+ * call to vmw_validation_add_bo().
+ * @ctx: Pointer to the validation context.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
+ * but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
+{
+ unsigned int size = sizeof(struct vmw_validation_bo_node);
+
+ if (!vmw_validation_mem_alloc(ctx, size))
+ return -ENOMEM;
+
+ ctx->mem_size_left += size;
+ return 0;
+}
+
+/**
+ * vmw_validation_preload_res - Preload the validation memory allocator for a
+ * call to vmw_validation_add_res().
+ * @ctx: Pointer to the validation context.
+ * @size: Size of the validation node extra data. See below.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
+ * sleep. An error is not fatal but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+ unsigned int size)
+{
+ size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
+ size) +
+ vmw_validation_align(sizeof(struct vmw_validation_bo_node));
+ if (!vmw_validation_mem_alloc(ctx, size))
+ return -ENOMEM;
+
+ ctx->mem_size_left += size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
new file mode 100644
index 000000000000..b57e3292c386
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_VALIDATION_H_
+#define _VMWGFX_VALIDATION_H_
+
+#include <drm/drm_hashtab.h>
+#include <linux/list.h>
+#include <linux/ww_mutex.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+/**
+ * struct vmw_validation_context - Per command submission validation context
+ * @ht: Hash table used to find resource- or buffer object duplicates
+ * @resource_list: List head for resource validation metadata
+ * @resource_ctx_list: List head for resource validation metadata for
+ * resources that need to be validated before those in @resource_list
+ * @bo_list: List head for buffer objects
+ * @page_list: List of pages used by the memory allocator
+ * @ticket: Ticked used for ww mutex locking
+ * @res_mutex: Pointer to mutex used for resource reserving
+ * @merge_dups: Whether to merge metadata for duplicate resources or
+ * buffer objects
+ * @mem_size_left: Free memory left in the last page in @page_list
+ * @page_address: Kernel virtual address of the last page in @page_list
+ */
+struct vmw_validation_context {
+ struct drm_open_hash *ht;
+ struct list_head resource_list;
+ struct list_head resource_ctx_list;
+ struct list_head bo_list;
+ struct list_head page_list;
+ struct ww_acquire_ctx ticket;
+ struct mutex *res_mutex;
+ unsigned int merge_dups;
+ unsigned int mem_size_left;
+ u8 *page_address;
+};
+
+struct vmw_buffer_object;
+struct vmw_resource;
+struct vmw_fence_obj;
+
+#if 0
+/**
+ * DECLARE_VAL_CONTEXT - Declare a validation context with initialization
+ * @_name: The name of the variable
+ * @_ht: The hash table used to find dups or NULL if none
+ * @_merge_dups: Whether to merge duplicate buffer object- or resource
+ * entries. If set to true, ideally a hash table pointer should be supplied
+ * as well unless the number of resources and buffer objects per validation
+ * is known to be very small
+ */
+#endif
+#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
+ struct vmw_validation_context _name = \
+ { .ht = _ht, \
+ .resource_list = LIST_HEAD_INIT((_name).resource_list), \
+ .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
+ .bo_list = LIST_HEAD_INIT((_name).bo_list), \
+ .page_list = LIST_HEAD_INIT((_name).page_list), \
+ .res_mutex = NULL, \
+ .merge_dups = _merge_dups, \
+ .mem_size_left = 0, \
+ }
+
+/**
+ * vmw_validation_has_bos - return whether the validation context has
+ * any buffer objects registered.
+ *
+ * @ctx: The validation context
+ * Returns: Whether any buffer objects are registered
+ */
+static inline bool
+vmw_validation_has_bos(struct vmw_validation_context *ctx)
+{
+ return !list_empty(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_set_ht - Register a hash table for duplicate finding
+ * @ctx: The validation context
+ * @ht: Pointer to a hash table to use for duplicate finding
+ * This function is intended to be used if the hash table wasn't
+ * available at validation context declaration time
+ */
+static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
+ struct drm_open_hash *ht)
+{
+ ctx->ht = ht;
+}
+
+/**
+ * vmw_validation_bo_reserve - Reserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ * @intr: Perform waits interruptible
+ *
+ * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
+ * code on failure
+ */
+static inline int
+vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
+ bool intr)
+{
+ return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
+ NULL);
+}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+static inline void
+vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
+
+/**
+ * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
+ * with a validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve, and fences them with a fence object.
+ */
+static inline void
+vmw_validation_bo_fence(struct vmw_validation_context *ctx,
+ struct vmw_fence_obj *fence)
+{
+ ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
+ (void *) fence);
+}
+
+/**
+ * vmw_validation_context_init - Initialize a validation context
+ * @ctx: Pointer to the validation context to initialize
+ *
+ * This function initializes a validation context with @merge_dups set
+ * to false
+ */
+static inline void
+vmw_validation_context_init(struct vmw_validation_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ INIT_LIST_HEAD(&ctx->resource_list);
+ INIT_LIST_HEAD(&ctx->resource_ctx_list);
+ INIT_LIST_HEAD(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_align - Align a validation memory allocation
+ * @val: The size to be aligned
+ *
+ * Returns: @val aligned to the granularity used by the validation memory
+ * allocator.
+ */
+static inline unsigned int vmw_validation_align(unsigned int val)
+{
+ return ALIGN(val, sizeof(long));
+}
+
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+ struct vmw_buffer_object *vbo,
+ bool as_mob, bool cpu_blit);
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob);
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ struct vmw_resource *res,
+ size_t priv_size,
+ void **p_node,
+ bool *first_usage);
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+ bool intr);
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+ bool backoff);
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+ void *val_private,
+ struct vmw_buffer_object *vbo,
+ unsigned long backup_offset);
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
+
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+ struct mutex *mutex, bool intr);
+void vmw_validation_revert(struct vmw_validation_context *ctx);
+void vmw_validation_done(struct vmw_validation_context *ctx,
+ struct vmw_fence_obj *fence);
+
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+ unsigned int size);
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+ unsigned int size);
+#endif