summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c792
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h (renamed from drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c)73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1498
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c398
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c252
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c48
-rwxr-xr-xdrivers/gpu/drm/amd/amdgpu/vce_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c186
-rw-r--r--drivers/gpu/drm/amd/display/TODO3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c183
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c294
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h493
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h224
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c568
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c532
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c)353
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h)117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h159
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h45
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c182
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h186
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c114
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/scheduler/Makefile26
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c (renamed from drivers/gpu/drm/amd/scheduler/gpu_scheduler.c)296
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c (renamed from drivers/gpu/drm/amd/scheduler/sched_fence.c)122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c55
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c3
-rw-r--r--include/drm/gpu_scheduler.h173
-rw-r--r--include/drm/gpu_scheduler_trace.h (renamed from drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h)14
-rw-r--r--include/drm/spsc_queue.h (renamed from drivers/gpu/drm/amd/scheduler/spsc_queue.h)7
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h4
193 files changed, 6633 insertions, 5873 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 49b31ad6f66d..d4b1635ba1f3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4811,6 +4811,15 @@ S: Maintained
F: drivers/gpu/drm/tinydrm/
F: include/drm/tinydrm/
+DRM TTM SUBSYSTEM
+M: Christian Koenig <christian.koenig@amd.com>
+M: Roger He <Hongbo.He@amd.com>
+T: git git://people.freedesktop.org/~agd5f/linux
+S: Maintained
+L: dri-devel@lists.freedesktop.org
+F: include/drm/ttm/
+F: drivers/gpu/drm/ttm/
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d853989848d6..0bc374459440 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -154,6 +154,10 @@ config DRM_VM
bool
depends on DRM && MMU
+config DRM_SCHED
+ tristate
+ depends on DRM
+
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
@@ -183,6 +187,7 @@ config DRM_AMDGPU
depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_SCHED
select DRM_TTM
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e5bf68b9c171..dd5ae67f8e2b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -102,3 +102,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_SCHED) += scheduler/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 90202cf4cd1e..d8da12c114b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -52,7 +52,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -62,7 +62,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \
- vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o
+ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
# add GMC block
amdgpu-y += \
@@ -135,10 +135,7 @@ amdgpu-y += \
amdgpu-y += amdgpu_cgs.o
# GPU scheduler
-amdgpu-y += \
- ../scheduler/gpu_scheduler.o \
- ../scheduler/sched_fence.o \
- amdgpu_job.o
+amdgpu-y += amdgpu_job.o
# ACP componet
ifneq ($(CONFIG_DRM_AMD_ACP),)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5e2958a79928..bbe06e04dcb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -45,6 +45,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
+#include <drm/gpu_scheduler.h>
#include <kgd_kfd_interface.h>
#include "dm_pp_interface.h"
@@ -68,10 +69,9 @@
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "amdgpu_dm.h"
-#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
-
+#include "amdgpu_debugfs.h"
/*
* Modules parameters.
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -223,17 +224,18 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_LAST
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state);
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state);
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state);
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags);
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
#define AMDGPU_MAX_IP_NUM 16
@@ -258,15 +260,16 @@ struct amdgpu_ip_block {
const struct amdgpu_ip_block_version *version;
};
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor);
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor);
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version);
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -346,7 +349,8 @@ struct amdgpu_gart_funcs {
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
- u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
+ void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+ u64 *dst, u64 *flags);
uint32_t (*get_invalidate_req)(unsigned int vm_id);
};
@@ -373,9 +377,6 @@ struct amdgpu_dummy_page {
struct page *page;
dma_addr_t addr;
};
-int amdgpu_dummy_page_init(struct amdgpu_device *adev);
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
-
/*
* Clocks
@@ -423,7 +424,6 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
@@ -540,6 +540,7 @@ struct amdgpu_mc {
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
+ bool translate_further;
};
/*
@@ -650,12 +651,6 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_INVALID = 0xFFFF
} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
-
/*
* IRQS.
*/
@@ -689,7 +684,7 @@ struct amdgpu_ib {
uint32_t flags;
};
-extern const struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct drm_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
@@ -699,7 +694,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f);
/*
@@ -732,7 +727,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
struct dma_fence **fences;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_ctx {
@@ -746,8 +741,8 @@ struct amdgpu_ctx {
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
- enum amd_sched_priority init_priority;
- enum amd_sched_priority override_priority;
+ enum drm_sched_priority init_priority;
+ enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
};
@@ -767,7 +762,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1116,7 +1111,7 @@ struct amdgpu_cs_parser {
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
struct amdgpu_job {
- struct amd_sched_job base;
+ struct drm_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
@@ -1170,10 +1165,10 @@ struct amdgpu_wb {
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
};
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
-void amdgpu_get_pcie_info(struct amdgpu_device *adev);
+void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/*
* SDMA
@@ -1238,24 +1233,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/
void amdgpu_test_moves(struct amdgpu_device *adev);
-/*
- * Debugfs
- */
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-
-#if defined(CONFIG_DEBUG_FS)
-int amdgpu_debugfs_init(struct drm_minor *minor);
-#endif
-
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
/*
* amdgpu smumgr functions
@@ -1410,9 +1387,6 @@ struct amdgpu_fw_vram_usage {
void *va;
};
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
-void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev);
-
/*
* CGS
*/
@@ -1428,6 +1402,80 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+
+/*
+ * amdgpu nbio functions
+ *
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+};
+
+
+/* Define the HW IP blocks will be used in driver , add more if necessary */
+enum amd_hw_ip_block_type {
+ GC_HWIP = 1,
+ HDP_HWIP,
+ SDMA0_HWIP,
+ SDMA1_HWIP,
+ MMHUB_HWIP,
+ ATHUB_HWIP,
+ NBIO_HWIP,
+ MP0_HWIP,
+ UVD_HWIP,
+ VCN_HWIP = UVD_HWIP,
+ VCE_HWIP,
+ DF_HWIP,
+ DCE_HWIP,
+ OSSSYS_HWIP,
+ SMUIO_HWIP,
+ PWR_HWIP,
+ NBIF_HWIP,
+ MAX_HWIP
+};
+
+#define HWIP_MAX_INSTANCE 6
+
struct amd_powerplay {
struct cgs_device *cgs_device;
void *pp_handle;
@@ -1620,6 +1668,11 @@ struct amdgpu_device {
/* amdkfd interface */
struct kfd_dev *kfd;
+ /* soc15 register offset based on ip, instance and segment */
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ const struct amdgpu_nbio_funcs *nbio_funcs;
+
/* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work;
@@ -1785,7 +1838,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
@@ -1835,23 +1888,25 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
/* Common functions */
-int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job);
-bool amdgpu_need_backup(struct amdgpu_device *adev);
-void amdgpu_pci_config_reset(struct amdgpu_device *adev);
-bool amdgpu_need_post(struct amdgpu_device *adev);
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job* job, bool force);
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc, u64 base);
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc);
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -1885,7 +1940,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend(struct amdgpu_device *adev);
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index c04f44a90392..a29362f9ef41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -277,7 +277,7 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block *ip_block =
- amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c70cda04dbfb..896b16db58aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -93,6 +93,39 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
adev->pdev, kfd2kgd);
}
+/**
+ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup amdkfd
+ *
+ * @adev: amdgpu_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
+ *
+ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
+ * takes doorbells required for its own rings and reports the setup to amdkfd.
+ * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ */
+static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
+{
+ /*
+ * The first num_doorbells are used by amdgpu.
+ * amdkfd takes whatever's left in the aperture.
+ */
+ if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
+ *aperture_base = adev->doorbell.base;
+ *aperture_size = adev->doorbell.size;
+ *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
+ }
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 39f4d0df1ada..bf872f694f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_atomfirmware.h"
#include "amdgpu_i2c.h"
#include "atom.h"
@@ -1699,7 +1700,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
+static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
@@ -1721,28 +1722,6 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
-{
- int i;
-
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
@@ -1798,7 +1777,7 @@ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
#endif
}
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
+static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
@@ -1841,3 +1820,234 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32(reg, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32(reg);
+ return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32_IO(reg, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg);
+ return r;
+}
+
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
+/**
+ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void amdgpu_atombios_fini(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context) {
+ kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
+ kfree(adev->mode_info.atom_context);
+ adev->mode_info.atom_context = NULL;
+ kfree(adev->mode_info.atom_card_info);
+ adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
+}
+
+/**
+ * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int amdgpu_atombios_init(struct amdgpu_device *adev)
+{
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ adev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = adev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (adev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
+ if (!adev->mode_info.atom_context) {
+ amdgpu_atombios_fini(adev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&adev->mode_info.atom_context->mutex);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ } else {
+ amdgpu_atombios_scratch_regs_init(adev);
+ amdgpu_atombios_allocate_fb_scratch(adev);
+ }
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index b0d5d1d7fdba..fd8f18074f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -195,9 +195,6 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
@@ -219,6 +216,7 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id);
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev);
+void amdgpu_atombios_fini(struct amdgpu_device *adev);
+int amdgpu_atombios_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 057e1ecd83ce..a5df80d50d44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -93,7 +93,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
- if (amdgpu_need_post(adev))
+ if (amdgpu_device_need_post(adev))
return false;
adev->bios = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 85d2149b9dbe..13607e28c1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -801,6 +801,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else
strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
+ case CHIP_RAVEN:
+ adev->pm.fw_version = info->version;
+ return 0;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4cea9ab237ac..5e539fc5b05f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -343,7 +343,12 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_operation_ctx ctx = { true, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .allow_reserved_eviction = false,
+ .resv = bo->tbo.resv
+ };
uint32_t domain;
int r;
@@ -1150,7 +1155,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
- struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
unsigned i;
uint64_t seq;
@@ -1173,7 +1178,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1202,7 +1207,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base, entity);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d71dc164b469..09d35051fdd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -28,10 +28,10 @@
#include "amdgpu_sched.h"
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
/* NORMAL and below are accessible by everyone */
- if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
@@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
- if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
@@ -78,19 +78,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
- ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
+ ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
- r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+ r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs, &ctx->guilty);
if (r)
goto failed;
@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
+ drm_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -126,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
+ drm_sched_entity_fini(&adev->rings[i]->sched,
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
@@ -137,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -266,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -278,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (priority == AMD_SCHED_PRIORITY_INVALID)
- priority = AMD_SCHED_PRIORITY_NORMAL;
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
+ priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -385,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
struct amdgpu_device *adev = ctx->adev;
- struct amd_sched_rq *rq;
- struct amd_sched_entity *entity;
+ struct drm_sched_rq *rq;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
- enum amd_sched_priority ctx_prio;
+ enum drm_sched_priority ctx_prio;
ctx->override_priority = priority;
- ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < adev->num_rings; i++) {
@@ -407,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
continue;
- amd_sched_entity_set_rq(entity, rq);
+ drm_sched_entity_set_rq(entity, rq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
new file mode 100644
index 000000000000..ee76b468774a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <drm/drmP.h>
+#include <linux/debugfs.h>
+#include "amdgpu.h"
+
+/*
+ * Debugfs
+ */
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < adev->debugfs_count; i++) {
+ if (adev->debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+
+ i = adev->debugfs_count + 1;
+ if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase "
+ "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
+ return -EINVAL;
+ }
+ adev->debugfs[adev->debugfs_count].files = files;
+ adev->debugfs[adev->debugfs_count].num_files = nfiles;
+ adev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ adev->ddev->primary->debugfs_root,
+ adev->ddev->primary);
+#endif
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ goto end;
+
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto end;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_PCIE(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_PCIE(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_DIDT(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_DIDT(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_SMC(*pos);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_SMC(*pos, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 3;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
+ /* rev==3 */
+ config[no_regs++] = adev->pdev->device;
+ config[no_regs++] = adev->pdev->revision;
+ config[no_regs++] = adev->pdev->subsystem_device;
+ config[no_regs++] = adev->pdev->subsystem_vendor;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int idx, x, outsize, r, valuesize;
+ uint32_t values[16];
+
+ if (size & 3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (amdgpu_dpm == 0)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ valuesize = sizeof(values);
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ else
+ return -EINVAL;
+
+ if (size > valuesize)
+ return -EINVAL;
+
+ outsize = 0;
+ x = 0;
+ if (!r) {
+ while (size) {
+ r = put_user(values[x++], (int32_t *)buf);
+ buf += 4;
+ size -= 4;
+ outsize += 4;
+ }
+ }
+
+ return !r ? outsize : r;
+}
+
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
+
+ data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[offset++];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+err:
+ kfree(data);
+ return result;
+}
+
+static const struct file_operations amdgpu_debugfs_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_read,
+ .write = amdgpu_debugfs_regs_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_didt_read,
+ .write = amdgpu_debugfs_regs_didt_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie_read,
+ .write = amdgpu_debugfs_regs_pcie_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_smc_read,
+ .write = amdgpu_debugfs_regs_smc_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+ &amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs_didt_fops,
+ &amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
+};
+
+static const char *debugfs_regs_names[] = {
+ "amdgpu_regs",
+ "amdgpu_regs_didt",
+ "amdgpu_regs_pcie",
+ "amdgpu_regs_smc",
+ "amdgpu_gca_config",
+ "amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ ent = debugfs_create_file(debugfs_regs_names[i],
+ S_IFREG | S_IRUGO, root,
+ adev, debugfs_regs[i]);
+ if (IS_ERR(ent)) {
+ for (j = 0; j < i; j++) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ return PTR_ERR(ent);
+ }
+
+ if (!i)
+ i_size_write(ent->d_inode, adev->rmmio_size);
+ adev->debugfs_regs[i] = ent;
+ }
+
+ return 0;
+}
+
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ if (adev->debugfs_regs[i]) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ }
+}
+
+static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0, i;
+
+ /* hold on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_park(ring->sched.thread);
+ }
+
+ seq_printf(m, "run ib test:\n");
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ seq_printf(m, "ib ring tests failed (%d).\n", r);
+ else
+ seq_printf(m, "ib ring tests passed.\n");
+
+ /* go on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_unpark(ring->sched.thread);
+ }
+
+ return 0;
+}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_debugfs_list[] = {
+ {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
+ {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
+ {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
+};
+
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
+ ARRAY_SIZE(amdgpu_debugfs_list));
+}
+
+#else
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 147822545252..8260d8073c26 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -1,5 +1,7 @@
/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,57 +21,22 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#include "dm_services.h"
-#include "include/grph_object_id.h"
-
-static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
-{
- bool rc = true;
-
- switch (id.type) {
- case OBJECT_TYPE_UNKNOWN:
- rc = false;
- break;
- case OBJECT_TYPE_GPU:
- case OBJECT_TYPE_ENGINE:
- /* do NOT check for id.id == 0 */
- if (id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- default:
- if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- }
-
- return rc;
-}
-
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2)
-{
- if (false == dal_graphics_object_id_is_valid(id1)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id1'!\n", __func__);
- return false;
- }
-
- if (false == dal_graphics_object_id_is_valid(id2)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id2'!\n", __func__);
- return false;
- }
-
- if (id1.id == id2.id && id1.enum_id == id2.enum_id
- && id1.type == id2.type)
- return true;
-
- return false;
-}
-
-
+/*
+ * Debugfs
+ */
+struct amdgpu_debugfs {
+ const struct drm_info_list *files;
+ unsigned num_files;
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+int amdgpu_debugfs_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles);
+int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 70c9e5756b02..357cd8bf2e55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
-#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -64,11 +63,6 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
-
static const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
@@ -333,7 +327,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
BUG();
}
-static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
{
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
@@ -342,13 +336,13 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
(void **)&adev->vram_scratch.ptr);
}
-static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
+static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
- * amdgpu_program_register_sequence - program an array of registers.
+ * amdgpu_device_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @registers: pointer to the register array
@@ -357,9 +351,9 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
- const u32 *registers,
- const u32 array_size)
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 *registers,
+ const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
@@ -383,7 +377,7 @@ void amdgpu_program_register_sequence(struct amdgpu_device *adev,
}
}
-void amdgpu_pci_config_reset(struct amdgpu_device *adev)
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
{
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
@@ -392,14 +386,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
* GPU doorbell aperture helpers function.
*/
/**
- * amdgpu_doorbell_init - Init doorbell driver information.
+ * amdgpu_device_doorbell_init - Init doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
-static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
@@ -432,66 +426,35 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Tear down doorbell driver information (CIK)
*/
-static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
{
iounmap(adev->doorbell.ptr);
adev->doorbell.ptr = NULL;
}
-/**
- * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup amdkfd
- *
- * @adev: amdgpu_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
- *
- * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
- * takes doorbells required for its own rings and reports the setup to amdkfd.
- * amdgpu reserved doorbells are at the start of the doorbell aperture.
- */
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /*
- * The first num_doorbells are used by amdgpu.
- * amdkfd takes whatever's left in the aperture.
- */
- if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = adev->doorbell.base;
- *aperture_size = adev->doorbell.size;
- *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
+
/*
- * amdgpu_wb_*()
+ * amdgpu_device_wb_*()
* Writeback is the method by which the GPU updates special pages in memory
* with the status of certain GPU events (fences, ring pointers,etc.).
*/
/**
- * amdgpu_wb_fini - Disable Writeback and free memory
+ * amdgpu_device_wb_fini - Disable Writeback and free memory
*
* @adev: amdgpu_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
-static void amdgpu_wb_fini(struct amdgpu_device *adev)
+static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
@@ -502,7 +465,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
@@ -510,7 +473,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
-static int amdgpu_wb_init(struct amdgpu_device *adev)
+static int amdgpu_device_wb_init(struct amdgpu_device *adev)
{
int r;
@@ -536,7 +499,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_get - Allocate a wb entry
+ * amdgpu_device_wb_get - Allocate a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
@@ -544,7 +507,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
* Allocate a wb slot for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
@@ -558,21 +521,21 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
}
/**
- * amdgpu_wb_free - Free a wb entry
+ * amdgpu_device_wb_free - Free a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Free a wb slot allocated for use by the driver (all asics)
*/
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
__clear_bit(wb >> 3, adev->wb.used);
}
/**
- * amdgpu_vram_location - try to find VRAM location
+ * amdgpu_device_vram_location - try to find VRAM location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
@@ -580,7 +543,8 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
* Function will try to place VRAM at base address provided
* as parameter.
*/
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
@@ -594,7 +558,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
}
/**
- * amdgpu_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GTT location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
@@ -605,7 +569,8 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc)
{
u64 size_af, size_bf;
@@ -632,101 +597,6 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
-/*
- * Firmware Reservation functions
- */
-/**
- * amdgpu_fw_reserve_vram_fini - free fw reserved vram
- *
- * @adev: amdgpu_device pointer
- *
- * free fw reserved vram if it has been reserved.
- */
-void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
- NULL, &adev->fw_vram_usage.va);
-}
-
-/**
- * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
- *
- * @adev: amdgpu_device pointer
- *
- * create bo vram reservation from fw.
- */
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int r = 0;
- int i;
- u64 vram_size = adev->mc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
-
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
-
- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
-
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
-
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), NULL);
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
-
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
- }
- return r;
-
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
-}
-
/**
* amdgpu_device_resize_fb_bar - try to resize FB BAR
*
@@ -771,7 +641,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
cmd & ~PCI_COMMAND_MEMORY);
/* Free the VRAM and doorbell BAR, we most likely need to move both. */
- amdgpu_doorbell_fini(adev);
+ amdgpu_device_doorbell_fini(adev);
if (adev->asic_type >= CHIP_BONAIRE)
pci_release_resource(adev->pdev, 2);
@@ -788,7 +658,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
*/
- r = amdgpu_doorbell_init(adev);
+ r = amdgpu_device_doorbell_init(adev);
if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
return -ENODEV;
@@ -801,7 +671,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
* GPU helpers function.
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_device_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
@@ -809,7 +679,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
* or post is needed if hw reset is performed.
* Returns true if need or false if not.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -854,285 +724,9 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
return true;
}
-/**
- * amdgpu_dummy_page_init - init dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page used by the driver (all asics).
- * This dummy page is used by the driver as a filler for gart entries
- * when pages are taken out of the GART
- * Returns 0 on sucess, -ENOMEM on failure.
- */
-int amdgpu_dummy_page_init(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page)
- return 0;
- adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
- if (adev->dummy_page.page == NULL)
- return -ENOMEM;
- adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
- dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * amdgpu_dummy_page_fini - free dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the dummy page used by the driver (all asics).
- */
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page == NULL)
- return;
- pci_unmap_page(adev->pdev, adev->dummy_page.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
-}
-
-
-/* ATOM accessor methods */
-/*
- * ATOM is an interpreted byte code stored in tables in the vbios. The
- * driver registers callbacks to access registers and the interpreter
- * in the driver parses the tables and executes then to program specific
- * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
- * atombios.h, and atom.c
- */
-
-/**
- * cail_pll_read - read PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- * Returns the value of the PLL register.
- */
-static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_pll_write - write PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- * @val: value to write to the pll register
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- */
-static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_mc_read - read MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- *
- * Provides an MC register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MC register.
- */
-static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_mc_write - write MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- * @val: value to write to the pll register
- *
- * Provides a MC register accessor for the atom interpreter (r4xx+).
- */
-static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_reg_write - write MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- * @val: value to write to the pll register
- *
- * Provides a MMIO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32(reg, val);
-}
-
-/**
- * cail_reg_read - read MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- *
- * Provides an MMIO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MMIO register.
- */
-static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32(reg);
- return r;
-}
-
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32_IO(reg, val);
-}
-
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32_IO(reg);
- return r;
-}
-
-static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct atom_context *ctx = adev->mode_info.atom_context;
-
- return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
-}
-
-static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
- NULL);
-
-/**
- * amdgpu_atombios_fini - free the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the driver info and register access callbacks for the ATOM
- * interpreter (r4xx+).
- * Called at driver shutdown.
- */
-static void amdgpu_atombios_fini(struct amdgpu_device *adev)
-{
- if (adev->mode_info.atom_context) {
- kfree(adev->mode_info.atom_context->scratch);
- kfree(adev->mode_info.atom_context->iio);
- }
- kfree(adev->mode_info.atom_context);
- adev->mode_info.atom_context = NULL;
- kfree(adev->mode_info.atom_card_info);
- adev->mode_info.atom_card_info = NULL;
- device_remove_file(adev->dev, &dev_attr_vbios_version);
-}
-
-/**
- * amdgpu_atombios_init - init the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Initializes the driver info and register access callbacks for the
- * ATOM interpreter (r4xx+).
- * Returns 0 on sucess, -ENOMEM on failure.
- * Called at driver startup.
- */
-static int amdgpu_atombios_init(struct amdgpu_device *adev)
-{
- struct card_info *atom_card_info =
- kzalloc(sizeof(struct card_info), GFP_KERNEL);
- int ret;
-
- if (!atom_card_info)
- return -ENOMEM;
-
- adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
- atom_card_info->reg_read = cail_reg_read;
- atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
- atom_card_info->mc_read = cail_mc_read;
- atom_card_info->mc_write = cail_mc_write;
- atom_card_info->pll_read = cail_pll_read;
- atom_card_info->pll_write = cail_pll_write;
-
- adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
- if (!adev->mode_info.atom_context) {
- amdgpu_atombios_fini(adev);
- return -ENOMEM;
- }
-
- mutex_init(&adev->mode_info.atom_context->mutex);
- if (adev->is_atom_fw) {
- amdgpu_atomfirmware_scratch_regs_init(adev);
- amdgpu_atomfirmware_allocate_fb_scratch(adev);
- } else {
- amdgpu_atombios_scratch_regs_init(adev);
- amdgpu_atombios_allocate_fb_scratch(adev);
- }
-
- ret = device_create_file(adev->dev, &dev_attr_vbios_version);
- if (ret) {
- DRM_ERROR("Failed to create device file for VBIOS version\n");
- return ret;
- }
-
- return 0;
-}
-
/* if we get transitioned to only one device, take VGA back */
/**
- * amdgpu_vga_set_decode - enable/disable vga decode
+ * amdgpu_device_vga_set_decode - enable/disable vga decode
*
* @cookie: amdgpu_device pointer
* @state: enable/disable vga decode
@@ -1140,7 +734,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
-static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
{
struct amdgpu_device *adev = cookie;
amdgpu_asic_set_vga_state(adev, state);
@@ -1151,7 +745,7 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static void amdgpu_check_block_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
@@ -1166,7 +760,7 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev)
}
}
-static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
{
/* no need to check the default value */
if (amdgpu_vm_size == -1)
@@ -1180,14 +774,14 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
}
/**
- * amdgpu_check_arguments - validate module params
+ * amdgpu_device_check_arguments - validate module params
*
* @adev: amdgpu_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-static void amdgpu_check_arguments(struct amdgpu_device *adev)
+static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
@@ -1220,9 +814,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_fragment_size = -1;
}
- amdgpu_check_vm_size(adev);
+ amdgpu_device_check_vm_size(adev);
- amdgpu_check_block_size(adev);
+ amdgpu_device_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!is_power_of_2(amdgpu_vram_page_split))) {
@@ -1230,6 +824,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+ if (amdgpu_lockup_timeout == 0) {
+ dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
+ amdgpu_lockup_timeout = 10000;
+ }
}
/**
@@ -1293,9 +892,9 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
.can_switch = amdgpu_switcheroo_can_switch,
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state)
{
int i, r = 0;
@@ -1315,9 +914,9 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
return r;
}
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state)
{
int i, r = 0;
@@ -1337,7 +936,8 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
return r;
}
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int i;
@@ -1349,8 +949,8 @@ void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
}
}
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i, r;
@@ -1368,8 +968,8 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
}
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
@@ -1383,8 +983,9 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
}
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
@@ -1396,7 +997,7 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_version_cmp
+ * amdgpu_device_ip_block_version_cmp
*
* @adev: amdgpu_device pointer
* @type: enum amd_ip_block_type
@@ -1406,11 +1007,11 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
* return 0 if equal or greater
* return 1 if smaller or the ip_block doesn't exist
*/
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor)
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor)
{
- struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
@@ -1421,7 +1022,7 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_add
+ * amdgpu_device_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
@@ -1429,8 +1030,8 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version)
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
@@ -1586,7 +1187,7 @@ out:
return err;
}
-static int amdgpu_early_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1695,7 +1296,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1711,7 +1312,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- r = amdgpu_vram_scratch_init(adev);
+ r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
@@ -1721,9 +1322,9 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
}
- r = amdgpu_wb_init(adev);
+ r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_wb_init failed %d\n", r);
+ DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -1762,18 +1363,18 @@ static int amdgpu_init(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
+static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
}
-static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
+static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
return !!memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM);
}
-static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
+static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1796,7 +1397,7 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_late_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1817,12 +1418,12 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
mod_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
return 0;
}
-static int amdgpu_fini(struct amdgpu_device *adev)
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
@@ -1856,8 +1457,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_free_static_csa(adev);
- amdgpu_wb_fini(adev);
- amdgpu_vram_scratch_fini(adev);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_vram_scratch_fini(adev);
}
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1910,14 +1511,14 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_late_init_func_handler(struct work_struct *work)
+static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_cg_state(adev);
}
-int amdgpu_suspend(struct amdgpu_device *adev)
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int i, r;
@@ -1925,10 +1526,10 @@ int amdgpu_suspend(struct amdgpu_device *adev)
amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
- r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
@@ -1958,7 +1559,7 @@ int amdgpu_suspend(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1987,7 +1588,7 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -2020,7 +1621,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase1(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
{
int i, r;
@@ -2043,7 +1644,7 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase2(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
{
int i, r;
@@ -2065,14 +1666,14 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
- r = amdgpu_resume_phase2(adev);
+ r = amdgpu_device_ip_resume_phase2(adev);
return r;
}
@@ -2211,7 +1812,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
- amdgpu_check_arguments(adev);
+ amdgpu_device_check_arguments(adev);
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
@@ -2229,7 +1830,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock);
- INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -2249,7 +1851,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
/* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ amdgpu_device_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2263,14 +1865,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("PCI I/O BAR is not found.\n");
/* early init functions */
- r = amdgpu_early_init(adev);
+ r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
- vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
+ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
if (amdgpu_runtime_pm == 1)
runtime = true;
@@ -2299,7 +1901,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
@@ -2345,7 +1947,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* init the mode config */
drm_mode_config_init(adev->ddev);
- r = amdgpu_init(adev);
+ r = amdgpu_device_ip_init(adev);
if (r) {
/* failed in exclusive mode due to timeout */
if (amdgpu_sriov_vf(adev) &&
@@ -2359,9 +1961,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EAGAIN;
goto failed;
}
- dev_err(adev->dev, "amdgpu_init failed\n");
+ dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- amdgpu_fini(adev);
+ amdgpu_device_ip_fini(adev);
goto failed;
}
@@ -2397,7 +1999,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- r = amdgpu_gem_debugfs_init(adev);
+ r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2405,17 +2007,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_test_ib_ring_init(adev);
- if (r)
- DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
-
r = amdgpu_debugfs_firmware_init(adev);
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_vbios_dump_init(adev);
+ r = amdgpu_debugfs_init(adev);
if (r)
- DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+ DRM_ERROR("Creating debugfs files failed (%d).\n", r);
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
@@ -2433,9 +2031,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_late_init failed\n");
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2466,12 +2064,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->shutdown = true;
if (adev->mode_info.mode_config_initialized)
drm_crtc_force_disable_all(adev->ddev);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
- r = amdgpu_fini(adev);
+ r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
@@ -2494,7 +2091,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
+ amdgpu_device_doorbell_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2575,7 +2172,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2583,7 +2180,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2632,18 +2228,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
goto unlock;
}
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r)
DRM_ERROR("amdgpu asic init failed\n");
}
- r = amdgpu_resume(adev);
+ r = amdgpu_device_ip_resume(adev);
if (r) {
- DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+ DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
goto unlock;
}
amdgpu_fence_driver_resume(adev);
@@ -2654,7 +2249,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r)
goto unlock;
@@ -2734,7 +2329,7 @@ unlock:
return r;
}
-static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
{
int i;
bool asic_hang = false;
@@ -2756,7 +2351,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
return asic_hang;
}
-static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2774,7 +2369,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
{
int i;
@@ -2795,7 +2390,7 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
return false;
}
-static int amdgpu_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2813,7 +2408,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2830,18 +2425,10 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
return 0;
}
-bool amdgpu_need_backup(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- return amdgpu_lockup_timeout > 0 ? true : false;
-}
-
-static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2874,7 +2461,7 @@ err:
}
/*
- * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
+ * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
*
* @adev: amdgpu device pointer
* @reset_flags: output param tells caller the reset result
@@ -2882,18 +2469,19 @@ err:
* attempt to do soft-reset or full-reset and reinitialize Asic
* return 0 means successed otherwise failed
*/
-static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+static int amdgpu_device_reset(struct amdgpu_device *adev,
+ uint64_t* reset_flags)
{
bool need_full_reset, vram_lost = 0;
int r;
- need_full_reset = amdgpu_need_full_reset(adev);
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
if (!need_full_reset) {
- amdgpu_pre_soft_reset(adev);
- r = amdgpu_soft_reset(adev);
- amdgpu_post_soft_reset(adev);
- if (r || amdgpu_check_soft_reset(adev)) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("soft reset failed, will fallback to full reset!\n");
need_full_reset = true;
}
@@ -2901,22 +2489,20 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
}
if (need_full_reset) {
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
retry:
- amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (!r) {
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
goto out;
- vram_lost = amdgpu_check_vram_lost(adev);
+ vram_lost = amdgpu_device_check_vram_lost(adev);
if (vram_lost) {
DRM_ERROR("VRAM is lost!\n");
atomic_inc(&adev->vram_lost_counter);
@@ -2927,12 +2513,12 @@ retry:
if (r)
goto out;
- r = amdgpu_resume_phase2(adev);
+ r = amdgpu_device_ip_resume_phase2(adev);
if (r)
goto out;
if (vram_lost)
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
}
}
@@ -2942,7 +2528,7 @@ out:
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
need_full_reset = true;
goto retry;
}
@@ -2960,7 +2546,7 @@ out:
}
/*
- * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu device pointer
* @reset_flags: output param tells caller the reset result
@@ -2968,7 +2554,9 @@ out:
* do VF FLR and reinitialize Asic
* return 0 means successed otherwise failed
*/
-static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
+static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ uint64_t *reset_flags,
+ bool from_hypervisor)
{
int r;
@@ -2980,7 +2568,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
return r;
/* Resume IP prior to SMC */
- r = amdgpu_sriov_reinit_early(adev);
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
goto error;
@@ -2988,7 +2576,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
/* now we are okay to resume SMC/CP/SDMA */
- r = amdgpu_sriov_reinit_late(adev);
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
if (r)
goto error;
@@ -3015,25 +2603,33 @@ error:
}
/**
- * amdgpu_gpu_recover - reset the asic and recover scheduler
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
* @adev: amdgpu device pointer
* @job: which job trigger hang
+ * @force forces reset regardless of amdgpu_gpu_recovery
*
* Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
-int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job, bool force)
{
struct drm_atomic_state *state = NULL;
uint64_t reset_flags = 0;
int i, r, resched;
- if (!amdgpu_check_soft_reset(adev)) {
+ if (!amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
return 0;
}
+ if (!force && (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
+ DRM_INFO("GPU recovery disabled.\n");
+ return 0;
+ }
+
dev_info(adev->dev, "GPU reset begin!\n");
mutex_lock(&adev->lock_reset);
@@ -3058,16 +2654,16 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
continue;
kthread_park(ring->sched.thread);
- amd_sched_hw_job_reset(&ring->sched, &job->base);
+ drm_sched_hw_job_reset(&ring->sched, &job->base);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
if (amdgpu_sriov_vf(adev))
- r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
+ r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
else
- r = amdgpu_reset(adev, &reset_flags);
+ r = amdgpu_device_reset(adev, &reset_flags);
if (!r) {
if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
@@ -3080,7 +2676,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
if (r) {
@@ -3111,7 +2707,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
if (job && job->ring->idx != i)
continue;
- amd_sched_job_recovery(&ring->sched);
+ drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
} else {
@@ -3153,7 +2749,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
return r;
}
-void amdgpu_get_pcie_info(struct amdgpu_device *adev)
+void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
@@ -3245,773 +2841,3 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
}
}
-/*
- * Debugfs
- */
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
-{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
-#endif
- return 0;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- goto end;
-
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto end;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
-end:
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- return result;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_PCIE(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_PCIE(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_DIDT(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_DIDT(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_SMC(*pos);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_SMC(*pos, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- uint32_t *config, no_regs = 0;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- /* version, increment each time something is added */
- config[no_regs++] = 3;
- config[no_regs++] = adev->gfx.config.max_shader_engines;
- config[no_regs++] = adev->gfx.config.max_tile_pipes;
- config[no_regs++] = adev->gfx.config.max_cu_per_sh;
- config[no_regs++] = adev->gfx.config.max_sh_per_se;
- config[no_regs++] = adev->gfx.config.max_backends_per_se;
- config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
- config[no_regs++] = adev->gfx.config.max_gprs;
- config[no_regs++] = adev->gfx.config.max_gs_threads;
- config[no_regs++] = adev->gfx.config.max_hw_contexts;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
- config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.num_tile_pipes;
- config[no_regs++] = adev->gfx.config.backend_enable_mask;
- config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
- config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
- config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
- config[no_regs++] = adev->gfx.config.num_gpus;
- config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
- config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
- config[no_regs++] = adev->gfx.config.gb_addr_config;
- config[no_regs++] = adev->gfx.config.num_rbs;
-
- /* rev==1 */
- config[no_regs++] = adev->rev_id;
- config[no_regs++] = adev->pg_flags;
- config[no_regs++] = adev->cg_flags;
-
- /* rev==2 */
- config[no_regs++] = adev->family;
- config[no_regs++] = adev->external_rev_id;
-
- /* rev==3 */
- config[no_regs++] = adev->pdev->device;
- config[no_regs++] = adev->pdev->revision;
- config[no_regs++] = adev->pdev->subsystem_device;
- config[no_regs++] = adev->pdev->subsystem_vendor;
-
- while (size && (*pos < no_regs * 4)) {
- uint32_t value;
-
- value = config[*pos >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- kfree(config);
- return r;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- kfree(config);
- return result;
-}
-
-static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- int idx, x, outsize, r, valuesize;
- uint32_t values[16];
-
- if (size & 3 || *pos & 0x3)
- return -EINVAL;
-
- if (amdgpu_dpm == 0)
- return -EINVAL;
-
- /* convert offset to sensor number */
- idx = *pos >> 2;
-
- valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
-
- if (size > valuesize)
- return -EINVAL;
-
- outsize = 0;
- x = 0;
- if (!r) {
- while (size) {
- r = put_user(values[x++], (int32_t *)buf);
- buf += 4;
- size -= 4;
- outsize += 4;
- }
- }
-
- return !r ? outsize : r;
-}
-
-static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r, x;
- ssize_t result=0;
- uint32_t offset, se, sh, cu, wave, simd, data[32];
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = (*pos & GENMASK_ULL(6, 0));
- se = (*pos & GENMASK_ULL(14, 7)) >> 7;
- sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
- cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
- wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
- simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- x = 0;
- if (adev->gfx.funcs->read_wave_data)
- adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- if (!x)
- return -EINVAL;
-
- while (size && (offset < x * 4)) {
- uint32_t value;
-
- value = data[offset >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- offset += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r;
- ssize_t result = 0;
- uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = *pos & GENMASK_ULL(11, 0);
- se = (*pos & GENMASK_ULL(19, 12)) >> 12;
- sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
- cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
- wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
- simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
- thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
- bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
-
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- if (bank == 0) {
- if (adev->gfx.funcs->read_wave_vgprs)
- adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
- } else {
- if (adev->gfx.funcs->read_wave_sgprs)
- adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
- }
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- while (size) {
- uint32_t value;
-
- value = data[offset++];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto err;
- }
-
- result += 4;
- buf += 4;
- size -= 4;
- }
-
-err:
- kfree(data);
- return result;
-}
-
-static const struct file_operations amdgpu_debugfs_regs_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_read,
- .write = amdgpu_debugfs_regs_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_didt_read,
- .write = amdgpu_debugfs_regs_didt_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_pcie_read,
- .write = amdgpu_debugfs_regs_pcie_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_smc_read,
- .write = amdgpu_debugfs_regs_smc_write,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_gca_config_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gca_config_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_sensors_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_sensor_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_wave_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_wave_read,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_gpr_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gpr_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations *debugfs_regs[] = {
- &amdgpu_debugfs_regs_fops,
- &amdgpu_debugfs_regs_didt_fops,
- &amdgpu_debugfs_regs_pcie_fops,
- &amdgpu_debugfs_regs_smc_fops,
- &amdgpu_debugfs_gca_config_fops,
- &amdgpu_debugfs_sensors_fops,
- &amdgpu_debugfs_wave_fops,
- &amdgpu_debugfs_gpr_fops,
-};
-
-static const char *debugfs_regs_names[] = {
- "amdgpu_regs",
- "amdgpu_regs_didt",
- "amdgpu_regs_pcie",
- "amdgpu_regs_smc",
- "amdgpu_gca_config",
- "amdgpu_sensors",
- "amdgpu_wave",
- "amdgpu_gpr",
-};
-
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
- unsigned i, j;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
- adev, debugfs_regs[i]);
- if (IS_ERR(ent)) {
- for (j = 0; j < i; j++) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- return PTR_ERR(ent);
- }
-
- if (!i)
- i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
- }
-
- return 0;
-}
-
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
-static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int r = 0, i;
-
- /* hold on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_park(ring->sched.thread);
- }
-
- seq_printf(m, "run ib test:\n");
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- seq_printf(m, "ib ring tests failed (%d).\n", r);
- else
- seq_printf(m, "ib ring tests passed.\n");
-
- /* go on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_unpark(ring->sched.thread);
- }
-
- return 0;
-}
-
-static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
- {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
-};
-
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_debugfs_test_ib_ring_list, 1);
-}
-
-int amdgpu_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-
-static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- seq_write(m, adev->bios, adev->bios_size);
- return 0;
-}
-
-static const struct drm_info_list amdgpu_vbios_dump_list[] = {
- {"amdgpu_vbios",
- amdgpu_debugfs_get_vbios_dump,
- 0, NULL},
-};
-
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_vbios_dump_list, 1);
-}
-#else
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 31383e004947..50afcf65181a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 10000;
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = -1; /* auto */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -645,7 +649,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- amdgpu_suspend(adev);
+ amdgpu_device_ip_suspend(adev);
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -850,9 +854,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
.get_scanout_position = amdgpu_get_crtc_scanout_position,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = amdgpu_debugfs_init,
-#endif
.irq_preinstall = amdgpu_irq_preinstall,
.irq_postinstall = amdgpu_irq_postinstall,
.irq_uninstall = amdgpu_irq_uninstall,
@@ -912,10 +913,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
- r = amd_sched_fence_slab_init();
- if (r)
- goto error_sched;
-
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -928,9 +925,6 @@ static int __init amdgpu_init(void)
/* let modprobe override vga console setting */
return pci_register_driver(pdriver);
-error_sched:
- amdgpu_fence_slab_fini();
-
error_fence:
amdgpu_sync_fini();
@@ -944,7 +938,6 @@ static void __exit amdgpu_exit(void)
pci_unregister_driver(pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
- amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 604ac03a42e4..008e1984b7e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -187,7 +187,7 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
seq = ++ring->fence_drv.sync_seq;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- seq, AMDGPU_FENCE_FLAG_INT);
+ seq, 0);
*s = seq;
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
- long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@@ -434,20 +433,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
- timeout, ring->name);
+ msecs_to_jiffies(amdgpu_lockup_timeout), ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@@ -503,7 +491,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
}
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- amd_sched_fini(&ring->sched);
+ drm_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
@@ -705,7 +693,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
seq_printf(m, "gpu recover\n");
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, true);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 1f51897acc5b..0a4f34afaaaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -57,6 +57,51 @@
*/
/**
+ * amdgpu_dummy_page_init - init dummy page used by the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
+{
+ if (adev->dummy_page.page)
+ return 0;
+ adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+ if (adev->dummy_page.page == NULL)
+ return -ENOMEM;
+ adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+ __free_page(adev->dummy_page.page);
+ adev->dummy_page.page = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_dummy_page_fini - free dummy page used by the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
+{
+ if (adev->dummy_page.page == NULL)
+ return;
+ pci_unmap_page(adev->pdev, adev->dummy_page.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(adev->dummy_page.page);
+ adev->dummy_page.page = NULL;
+}
+
+/**
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
*
* @adev: amdgpu_device pointer
@@ -308,7 +353,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
- r = amdgpu_dummy_page_init(adev);
+ r = amdgpu_gart_dummy_page_init(adev);
if (r)
return r;
/* Compute table size */
@@ -340,5 +385,5 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
vfree(adev->gart.pages);
adev->gart.pages = NULL;
#endif
- amdgpu_dummy_page_fini(adev);
+ amdgpu_gart_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index eb75eb44efc6..10805edcf964 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -851,7 +851,7 @@ static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
};
#endif
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ef043361009f..bb40d2529a30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -203,7 +203,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
return r;
@@ -229,7 +229,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq)
{
- amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f5f27e4f0f7f..06373d44b3da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -92,15 +92,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
}
return 0;
} else {
- r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
if (r) {
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
return r;
}
@@ -133,8 +133,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
&adev->irq.ih.gpu_addr,
(void **)&adev->irq.ih.ring);
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index c340774082ea..56bcd59c3399 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
reset_work);
if (!amdgpu_sriov_vf(adev))
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
/* Disable *all* interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index bdc210ac74f8..56d9ee5013a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
- amdgpu_gpu_recover(job->adev, job);
+ amdgpu_device_gpu_recover(job->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f)
{
int r;
@@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
@@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
- amd_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base, entity);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
- struct amd_sched_entity *s_entity)
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
@@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
- if (amd_sched_dependency_optimized(fence, s_entity)) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
if (r)
DRM_ERROR("Error adding fence to sync (%d)\n", r);
@@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
return fence;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev;
@@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index dc0a8be98043..5c4c3e0d527b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,6 +37,18 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
+ return false;
+
+ return true;
+}
+
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -327,7 +339,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
- struct ttm_operation_ctx ctx = { !kernel, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = !kernel,
+ .no_wait_gpu = false,
+ .allow_reserved_eviction = true,
+ .resv = resv
+ };
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 83205b93e62d..01a996c6b802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1278,16 +1278,16 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
amdgpu_pm_compute_clocks(adev);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex);
@@ -1584,7 +1584,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *ddev = adev->ddev;
u32 flags = 0;
- amdgpu_get_clockgating_state(adev, &flags);
+ amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
amdgpu_parse_cg_state(m, flags);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a98fbbb4739f..13044e66dcaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Release a request for executing at @priority
*/
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
@@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
return;
/* no need to restore if the job is already at the lowest priority */
- if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ if (priority == DRM_SCHED_PRIORITY_NORMAL)
return;
mutex_lock(&ring->priority_mutex);
@@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
goto out_unlock;
/* decay priority to the next level with a job available */
- for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- if (i == AMD_SCHED_PRIORITY_NORMAL
+ for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ if (i == DRM_SCHED_PRIORITY_NORMAL
|| atomic_read(&ring->num_jobs[i])) {
ring->priority = i;
ring->funcs->set_priority(ring, i);
@@ -206,7 +206,7 @@ out_unlock:
* Request a ring's priority to be raised to @priority (refcounted).
*/
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
if (!ring->funcs->set_priority)
return;
@@ -263,25 +263,25 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->fence_offs);
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
@@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
- ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
- for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) {
@@ -348,11 +348,11 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index a6b89e3932a5..010f69084af5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -25,7 +25,7 @@
#define __AMDGPU_RING_H__
#include <drm/amdgpu_drm.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
/* max number of rings */
#define AMDGPU_MAX_RINGS 18
@@ -154,14 +154,14 @@ struct amdgpu_ring_funcs {
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
};
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
+ struct drm_gpu_scheduler sched;
struct list_head lru_list;
struct amdgpu_bo *ring_obj;
@@ -186,6 +186,7 @@ struct amdgpu_ring {
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
+ bool use_pollmem;
unsigned wptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
@@ -196,7 +197,7 @@ struct amdgpu_ring {
unsigned vm_inv_eng;
bool has_compute_vm_bug;
- atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
@@ -212,9 +213,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 290cc3f9c433..86a0715d9431 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -29,29 +29,29 @@
#include "amdgpu_vm.h"
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_HW;
+ return DRM_SCHED_PRIORITY_HIGH_HW;
case AMDGPU_CTX_PRIORITY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_SW;
+ return DRM_SCHED_PRIORITY_HIGH_SW;
case AMDGPU_CTX_PRIORITY_NORMAL:
- return AMD_SCHED_PRIORITY_NORMAL;
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return AMD_SCHED_PRIORITY_LOW;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_UNSET:
- return AMD_SCHED_PRIORITY_UNSET;
+ return DRM_SCHED_PRIORITY_UNSET;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return AMD_SCHED_PRIORITY_INVALID;
+ return DRM_SCHED_PRIORITY_INVALID;
}
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct file *filp = fcheck(fd);
struct drm_file *file;
@@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index b28c067d3822..2a1a0c734bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -27,7 +27,7 @@
#include <drm/drmP.h>
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index ebe1ffbab0c1..df65c66dc956 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -64,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence) {
struct amdgpu_ring *ring;
@@ -85,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
*/
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence)
return s_fence->owner;
@@ -120,7 +120,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
{
struct amdgpu_sync_entry *e;
@@ -129,6 +129,10 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
continue;
amdgpu_sync_keep_later(&e->fence, f);
+
+ /* Preserve eplicit flag to not loose pipe line sync */
+ e->explicit |= explicit;
+
return true;
}
return false;
@@ -148,12 +152,11 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (!f)
return 0;
-
if (amdgpu_sync_same_dev(adev, f) &&
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
- if (amdgpu_sync_add_later(sync, f))
+ if (amdgpu_sync_add_later(sync, f, explicit))
return 0;
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
@@ -245,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct dma_fence *f = e->fence;
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 952e0bf3bc84..f1b7d987bd57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -76,7 +76,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int r;
adev->mman.mem_global_referenced = false;
@@ -108,8 +108,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
@@ -131,7 +131,7 @@ error_mem:
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- amd_sched_entity_fini(adev->mman.entity.sched,
+ drm_sched_entity_fini(adev->mman.entity.sched,
&adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
@@ -505,7 +505,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -536,7 +536,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -597,8 +597,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
return r;
}
@@ -1270,6 +1269,101 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.access_memory = &amdgpu_ttm_access_memory
};
+/*
+ * Firmware Reservation functions
+ */
+/**
+ * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free fw reserved vram if it has been reserved.
+ */
+static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+ NULL, &adev->fw_vram_usage.va);
+}
+
+/**
+ * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int r = 0;
+ int i;
+ u64 vram_size = adev->mc.visible_vram_size;
+ u64 offset = adev->fw_vram_usage.start_offset;
+ u64 size = adev->fw_vram_usage.size;
+ struct amdgpu_bo *bo;
+
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
+
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
+ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
+
+ r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+ if (r)
+ goto error_reserve;
+
+ /* remove the original mem node and create a new one at the
+ * request position
+ */
+ bo = adev->fw_vram_usage.reserved_bo;
+ offset = ALIGN(offset, PAGE_SIZE);
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+ bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
+ &bo->tbo.mem, &ctx);
+ if (r)
+ goto error_pin;
+
+ r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+ adev->fw_vram_usage.size), NULL);
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+ if (r)
+ goto error_kmap;
+
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ }
+ return r;
+
+error_kmap:
+ amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+ amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+ return r;
+}
+
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
@@ -1312,7 +1406,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_fw_reserve_vram_init(adev);
+ r = amdgpu_ttm_fw_reserve_vram_init(adev);
if (r) {
return r;
}
@@ -1330,9 +1424,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
struct sysinfo si;
si_meminfo(&si);
- gtt_size = max(AMDGPU_DEFAULT_GTT_SIZE_MB << 20,
- (uint64_t)si.totalram * si.mem_unit * 3/4);
- } else
+ gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size),
+ ((uint64_t)si.totalram * si.mem_unit * 3/4));
+ }
+ else
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
@@ -1396,7 +1492,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_ttm_debugfs_fini(adev);
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
- amdgpu_fw_reserve_vram_fini(adev);
+ amdgpu_ttm_fw_reserve_vram_fini(adev);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 4f9433e61406..167856f6080f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -25,7 +25,7 @@
#define __AMDGPU_TTM_H__
#include "amdgpu.h"
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -55,7 +55,7 @@ struct amdgpu_mman {
struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_copy_mem {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f2a9e17fdb4..b2eae86bf906 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -116,7 +116,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -230,8 +230,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
ring = &adev->uvd.ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n");
@@ -244,7 +244,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
- if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
switch (adev->asic_type) {
@@ -272,7 +272,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int i;
kfree(adev->uvd.saved_bo);
- amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+ drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
&adev->uvd.gpu_addr,
@@ -297,6 +297,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
@@ -304,8 +306,6 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_UVD_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->uvd.idle_work);
-
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
@@ -346,6 +346,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
+ /* to restore uvd fence seq */
+ amdgpu_fence_driver_force_completion(&adev->uvd.ring);
}
return 0;
@@ -1153,10 +1155,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1176,10 +1178,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_uvd(adev, true);
} else {
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 845eea993f75..32ea20b99e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -51,8 +51,8 @@ struct amdgpu_uvd {
struct amdgpu_irq_src irq;
bool address_64_bit;
bool use_ctx_buf;
- struct amd_sched_entity entity;
- struct amd_sched_entity entity_enc;
+ struct drm_sched_entity entity;
+ struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
unsigned num_enc_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index ba6d846b08ff..9857d482c942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -85,7 +85,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -174,8 +174,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
}
ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
@@ -207,7 +207,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -311,10 +311,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -343,10 +343,10 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 5ce54cde472d..162cae94e3b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -46,7 +46,7 @@ struct amdgpu_vce {
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index d7ba048c2f80..837962118dbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -35,7 +35,6 @@
#include "soc15d.h"
#include "soc15_common.h"
-#include "soc15ip.h"
#include "vcn/vcn_1_0_offset.h"
/* 1 second timeout */
@@ -51,7 +50,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -104,8 +103,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
ring = &adev->vcn.ring_dec;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN dec run queue.\n");
@@ -113,8 +112,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
ring = &adev->vcn.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN enc run queue.\n");
@@ -130,9 +129,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
kfree(adev->vcn.saved_bo);
- amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
+ drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
- amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
+ drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d50ba0657854..2fd7db891689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -56,8 +56,8 @@ struct amdgpu_vcn {
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
struct amdgpu_irq_src irq;
- struct amd_sched_entity entity_dec;
- struct amd_sched_entity entity_enc;
+ struct drm_sched_entity entity_dec;
+ struct drm_sched_entity entity_enc;
unsigned num_enc_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3ecdbdfb04dd..398abbcbf029 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -148,12 +148,23 @@ struct amdgpu_prt_cb {
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- if (level != adev->vm_manager.num_level)
- return 9 * (adev->vm_manager.num_level - level - 1) +
+ unsigned shift = 0xff;
+
+ switch (level) {
+ case AMDGPU_VM_PDB2:
+ case AMDGPU_VM_PDB1:
+ case AMDGPU_VM_PDB0:
+ shift = 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- else
- /* For the page tables on the leaves */
- return 0;
+ break;
+ case AMDGPU_VM_PTB:
+ shift = 0;
+ break;
+ default:
+ dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ }
+
+ return shift;
}
/**
@@ -166,12 +177,13 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = amdgpu_vm_level_shift(adev, 0);
+ unsigned shift = amdgpu_vm_level_shift(adev,
+ adev->vm_manager.root_level);
- if (level == 0)
+ if (level == adev->vm_manager.root_level)
/* For the root directory */
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
- else if (level != adev->vm_manager.num_level)
+ else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
else
@@ -329,9 +341,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
to >= amdgpu_vm_num_entries(adev, level))
return -EINVAL;
- if (to > parent->last_entry_used)
- parent->last_entry_used = to;
-
++level;
saddr = saddr & ((1 << shift) - 1);
eaddr = eaddr & ((1 << shift) - 1);
@@ -346,7 +355,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (vm->pte_support_ats) {
init_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != adev->vm_manager.num_level - 1)
+ if (level != AMDGPU_VM_PTB)
init_value |= AMDGPU_PDE_PTE;
}
@@ -386,10 +395,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- entry->addr = 0;
}
- if (level < adev->vm_manager.num_level) {
+ if (level < AMDGPU_VM_PTB) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
@@ -435,7 +443,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
+ return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+ adev->vm_manager.root_level);
}
/**
@@ -732,7 +741,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
has_compute_vm_bug = false;
- ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block) {
/* Compute has a VM bug for GFX version < 7.
Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
@@ -1060,162 +1069,52 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/*
- * amdgpu_vm_update_level - update a single level in the hierarchy
+ * amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @adev: amdgpu_device pointer
+ * @param: parameters for the update
* @vm: requested vm
* @parent: parent directory
+ * @entry: entry to update
*
- * Makes sure all entries in @parent are up to date.
- * Returns 0 for success, error for failure.
+ * Makes sure the requested entry in parent is up to date.
*/
-static int amdgpu_vm_update_level(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ struct amdgpu_vm_pt *entry)
{
- struct amdgpu_bo *shadow;
- struct amdgpu_ring *ring = NULL;
+ struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo;
uint64_t pd_addr, shadow_addr = 0;
- uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
- unsigned count = 0, pt_idx, ndw = 0;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *fence = NULL;
- uint32_t incr;
-
- int r;
+ uint64_t pde, pt, flags;
+ unsigned level;
- if (!parent->entries)
- return 0;
-
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- shadow = parent->base.bo->shadow;
+ /* Don't update huge pages here */
+ if (entry->huge)
+ return;
if (vm->use_cpu_for_update) {
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
- r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
} else {
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
- sched);
-
- /* padding, etc. */
- ndw = 64;
-
- /* assume the worst case */
- ndw += parent->last_entry_used * 6;
-
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
-
- if (shadow) {
+ shadow = parent->base.bo->shadow;
+ if (shadow)
shadow_addr = amdgpu_bo_gpu_offset(shadow);
- ndw *= 2;
- } else {
- shadow_addr = 0;
- }
-
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
}
+ for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
+ pbo = pbo->parent;
- /* walk over the address space and update the directory */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- struct amdgpu_bo *bo = entry->base.bo;
- uint64_t pde, pt;
-
- if (bo == NULL)
- continue;
-
- spin_lock(&vm->status_lock);
- list_del_init(&entry->base.vm_status);
- spin_unlock(&vm->status_lock);
-
- pt = amdgpu_bo_gpu_offset(bo);
- pt = amdgpu_gart_get_vm_pde(adev, pt);
- /* Don't update huge pages here */
- if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
- parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
- continue;
-
- parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
-
- pde = pd_addr + pt_idx * 8;
- incr = amdgpu_bo_size(bo);
- if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt) ||
- (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
-
- if (count) {
- if (shadow)
- params.func(&params,
- last_shadow,
- last_pt, count,
- incr,
- AMDGPU_PTE_VALID);
-
- params.func(&params, last_pde,
- last_pt, count, incr,
- AMDGPU_PTE_VALID);
- }
-
- count = 1;
- last_pde = pde;
- last_shadow = shadow_addr + pt_idx * 8;
- last_pt = pt;
- } else {
- ++count;
- }
- }
-
- if (count) {
- if (vm->root.base.bo->shadow)
- params.func(&params, last_shadow, last_pt,
- count, incr, AMDGPU_PTE_VALID);
-
- params.func(&params, last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
- }
-
- if (!vm->use_cpu_for_update) {
- if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync,
- parent->base.bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync,
- shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
-
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(parent->base.bo, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
- }
+ level += params->adev->vm_manager.root_level;
+ pt = amdgpu_bo_gpu_offset(bo);
+ flags = AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
+ if (shadow) {
+ pde = shadow_addr + (entry - parent->entries) * 8;
+ params->func(params, pde, pt, 1, 0, flags);
}
- return 0;
-
-error_free:
- amdgpu_job_free(job);
- return r;
+ pde = pd_addr + (entry - parent->entries) * 8;
+ params->func(params, pde, pt, 1, 0, flags);
}
/*
@@ -1225,27 +1124,29 @@ error_free:
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned pt_idx;
+ unsigned pt_idx, num_entries;
/*
* Recurse into the subdirectories. This recursion is harmless because
* we only have a maximum of 5 layers.
*/
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+ num_entries = amdgpu_vm_num_entries(adev, level);
+ for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
if (!entry->base.bo)
continue;
- entry->addr = ~0ULL;
spin_lock(&vm->status_lock);
if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- amdgpu_vm_invalidate_level(vm, entry);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
}
}
@@ -1261,38 +1162,63 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
+ struct amdgpu_pte_update_params params;
+ struct amdgpu_job *job;
+ unsigned ndw = 0;
int r = 0;
+ if (list_empty(&vm->relocated))
+ return 0;
+
+restart:
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
+ return r;
+
+ params.func = amdgpu_vm_cpu_set_ptes;
+ } else {
+ ndw = 512 * 8;
+ r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ if (r)
+ return r;
+
+ params.ib = &job->ibs[0];
+ params.func = amdgpu_vm_do_set_ptes;
+ }
+
spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_vm_bo_base *bo_base, *parent;
+ struct amdgpu_vm_pt *pt, *entry;
struct amdgpu_bo *bo;
bo_base = list_first_entry(&vm->relocated,
struct amdgpu_vm_bo_base,
vm_status);
+ list_del_init(&bo_base->vm_status);
spin_unlock(&vm->status_lock);
bo = bo_base->bo->parent;
- if (bo) {
- struct amdgpu_vm_bo_base *parent;
- struct amdgpu_vm_pt *pt;
-
- parent = list_first_entry(&bo->va,
- struct amdgpu_vm_bo_base,
- bo_list);
- pt = container_of(parent, struct amdgpu_vm_pt, base);
-
- r = amdgpu_vm_update_level(adev, vm, pt);
- if (r) {
- amdgpu_vm_invalidate_level(vm, &vm->root);
- return r;
- }
- spin_lock(&vm->status_lock);
- } else {
+ if (!bo) {
spin_lock(&vm->status_lock);
- list_del_init(&bo_base->vm_status);
+ continue;
}
+
+ parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+ entry = container_of(bo_base, struct amdgpu_vm_pt, base);
+
+ amdgpu_vm_update_pde(&params, vm, pt, entry);
+
+ spin_lock(&vm->status_lock);
+ if (!vm->use_cpu_for_update &&
+ (ndw - params.ib->length_dw) < 32)
+ break;
}
spin_unlock(&vm->status_lock);
@@ -1300,8 +1226,44 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
/* Flush HDP */
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
+ } else if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ } else {
+ struct amdgpu_bo *root = vm->root.base.bo;
+ struct amdgpu_ring *ring;
+ struct dma_fence *fence;
+
+ ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ sched);
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
+ if (root->shadow)
+ amdgpu_sync_resv(adev, &job->sync,
+ root->shadow->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
+
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, fence, true);
+ dma_fence_put(vm->last_update);
+ vm->last_update = fence;
}
+ if (!list_empty(&vm->relocated))
+ goto restart;
+
+ return 0;
+
+error:
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
+ amdgpu_job_free(job);
return r;
}
@@ -1319,19 +1281,19 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
struct amdgpu_vm_pt **entry,
struct amdgpu_vm_pt **parent)
{
- unsigned level = 0;
+ unsigned level = p->adev->vm_manager.root_level;
*parent = NULL;
*entry = &p->vm->root;
while ((*entry)->entries) {
- unsigned idx = addr >> amdgpu_vm_level_shift(p->adev, level++);
+ unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
- idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
*parent = *entry;
- *entry = &(*entry)->entries[idx];
+ *entry = &(*entry)->entries[addr >> shift];
+ addr &= (1ULL << shift) - 1;
}
- if (level != p->adev->vm_manager.num_level)
+ if (level != AMDGPU_VM_PTB)
*entry = NULL;
}
@@ -1363,17 +1325,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
!(flags & AMDGPU_PTE_VALID)) {
dst = amdgpu_bo_gpu_offset(entry->base.bo);
- dst = amdgpu_gart_get_vm_pde(p->adev, dst);
flags = AMDGPU_PTE_VALID;
} else {
/* Set the huge page flag to stop scanning at this PDE */
flags |= AMDGPU_PDE_PTE;
}
- if (entry->addr == (dst | flags))
+ if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
return;
+ entry->huge = !!(flags & AMDGPU_PDE_PTE);
- entry->addr = (dst | flags);
+ amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
+ &dst, &flags);
if (use_cpu_update) {
/* In case a huge page is replaced with a system
@@ -1447,7 +1410,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
amdgpu_vm_handle_huge_pages(params, entry, parent,
nptes, dst, flags);
/* We don't need to update PTEs for huge pages */
- if (entry->addr & AMDGPU_PDE_PTE)
+ if (entry->huge)
continue;
pt = entry->base.bo;
@@ -1688,7 +1651,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(vm, &vm->root);
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
return r;
}
@@ -2604,7 +2568,19 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
tmp >>= amdgpu_vm_block_size - 9;
tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
-
+ switch (adev->vm_manager.num_level) {
+ case 3:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB2;
+ break;
+ case 2:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB1;
+ break;
+ case 1:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB0;
+ break;
+ default:
+ dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
+ }
/* block size depends on vm size and hw setup*/
if (amdgpu_vm_block_size != -1)
adev->vm_manager.block_size =
@@ -2643,7 +2619,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int r, i;
u64 flags;
uint64_t init_pde_value = 0;
@@ -2663,8 +2639,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &vm->entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs, NULL);
if (r)
return r;
@@ -2698,7 +2674,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
- r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
+ r = amdgpu_bo_create(adev,
+ amdgpu_vm_bo_size(adev, adev->vm_manager.root_level),
+ align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
NULL, NULL, init_pde_value, &vm->root.base.bo);
@@ -2744,7 +2722,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- amd_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_fini(&ring->sched, &vm->entity);
return r;
}
@@ -2752,26 +2730,31 @@ error_free_sched_entity:
/**
* amdgpu_vm_free_levels - free PD/PT levels
*
- * @level: PD/PT starting level to free
+ * @adev: amdgpu device structure
+ * @parent: PD/PT starting level to free
+ * @level: level of parent structure
*
* Free the page directory or page table level and all sub levels.
*/
-static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
+static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned i;
+ unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
- if (level->base.bo) {
- list_del(&level->base.bo_list);
- list_del(&level->base.vm_status);
- amdgpu_bo_unref(&level->base.bo->shadow);
- amdgpu_bo_unref(&level->base.bo);
+ if (parent->base.bo) {
+ list_del(&parent->base.bo_list);
+ list_del(&parent->base.vm_status);
+ amdgpu_bo_unref(&parent->base.bo->shadow);
+ amdgpu_bo_unref(&parent->base.bo);
}
- if (level->entries)
- for (i = 0; i <= level->last_entry_used; i++)
- amdgpu_vm_free_levels(&level->entries[i]);
+ if (parent->entries)
+ for (i = 0; i < num_entries; i++)
+ amdgpu_vm_free_levels(adev, &parent->entries[i],
+ level + 1);
- kvfree(level->entries);
+ kvfree(parent->entries);
}
/**
@@ -2803,7 +2786,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2829,7 +2812,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_levels(&vm->root);
+ amdgpu_vm_free_levels(adev, &vm->root,
+ adev->vm_manager.root_level);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 43ea131dd411..edd2ea52dc00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -24,10 +24,11 @@
#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
-#include <linux/rbtree.h>
#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
-#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -69,6 +70,12 @@ struct amdgpu_bo_list_entry;
/* PDE is handled as PTE for VEGA10 */
#define AMDGPU_PDE_PTE (1ULL << 54)
+/* PTE is handled as PDE for VEGA10 (Translate Further) */
+#define AMDGPU_PTE_TF (1ULL << 56)
+
+/* PDE Block Fragment Size for VEGA10 */
+#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
+
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
@@ -119,6 +126,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* VMPT level enumerate, and the hiberachy is:
+ * PDB2->PDB1->PDB0->PTB
+ */
+enum amdgpu_vm_level {
+ AMDGPU_VM_PDB2,
+ AMDGPU_VM_PDB1,
+ AMDGPU_VM_PDB0,
+ AMDGPU_VM_PTB
+};
+
/* base structure for tracking BO usage in a VM */
struct amdgpu_vm_bo_base {
/* constant after initialization */
@@ -137,11 +154,10 @@ struct amdgpu_vm_bo_base {
struct amdgpu_vm_pt {
struct amdgpu_vm_bo_base base;
- uint64_t addr;
+ bool huge;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
};
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
@@ -175,7 +191,7 @@ struct amdgpu_vm {
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
/* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id;
@@ -236,6 +252,7 @@ struct amdgpu_vm_manager {
uint32_t num_level;
uint32_t block_size;
uint32_t fragment_size;
+ enum amdgpu_vm_level root_level;
/* vram base address for page table entry */
u64 vram_base_offset;
/* vm pte handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index f11c0aacf19f..a0943aa8d1d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -891,12 +891,12 @@ static void ci_dpm_powergate_uvd(void *handle, bool gate)
if (gate) {
/* stop the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
ci_update_uvd_dpm(adev, gate);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
ci_update_uvd_dpm(adev, gate);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 8ba056a2a5da..8e59e65efd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -755,74 +755,74 @@ static void cik_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_program_register_sequence(adev,
- bonaire_mgcg_cgcg_init,
- ARRAY_SIZE(bonaire_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_registers,
- ARRAY_SIZE(bonaire_golden_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_common_registers,
- ARRAY_SIZE(bonaire_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_spm_registers,
- ARRAY_SIZE(bonaire_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_mgcg_cgcg_init,
+ ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_registers,
+ ARRAY_SIZE(bonaire_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_common_registers,
+ ARRAY_SIZE(bonaire_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_spm_registers,
+ ARRAY_SIZE(bonaire_golden_spm_registers));
break;
case CHIP_KABINI:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_registers,
- ARRAY_SIZE(kalindi_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_registers,
+ ARRAY_SIZE(kalindi_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_MULLINS:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- godavari_golden_registers,
- ARRAY_SIZE(godavari_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ godavari_golden_registers,
+ ARRAY_SIZE(godavari_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_KAVERI:
- amdgpu_program_register_sequence(adev,
- spectre_mgcg_cgcg_init,
- ARRAY_SIZE(spectre_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- spectre_golden_registers,
- ARRAY_SIZE(spectre_golden_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_common_registers,
- ARRAY_SIZE(spectre_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_spm_registers,
- ARRAY_SIZE(spectre_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_mgcg_cgcg_init,
+ ARRAY_SIZE(spectre_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_registers,
+ ARRAY_SIZE(spectre_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_common_registers,
+ ARRAY_SIZE(spectre_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_spm_registers,
+ ARRAY_SIZE(spectre_golden_spm_registers));
break;
case CHIP_HAWAII:
- amdgpu_program_register_sequence(adev,
- hawaii_mgcg_cgcg_init,
- ARRAY_SIZE(hawaii_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_registers,
- ARRAY_SIZE(hawaii_golden_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_common_registers,
- ARRAY_SIZE(hawaii_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_spm_registers,
- ARRAY_SIZE(hawaii_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_mgcg_cgcg_init,
+ ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_registers,
+ ARRAY_SIZE(hawaii_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_common_registers,
+ ARRAY_SIZE(hawaii_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_spm_registers,
+ ARRAY_SIZE(hawaii_golden_spm_registers));
break;
default:
break;
@@ -1246,7 +1246,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -1866,7 +1866,7 @@ static int cik_common_early_init(void *handle)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_get_pcie_info(adev);
+ amdgpu_device_get_pcie_info(adev);
return 0;
}
@@ -1974,77 +1974,77 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_HAWAII:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KAVERI:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ed26dcbc4f79..e406c93d01d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -626,7 +626,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -639,7 +639,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
@@ -663,7 +663,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -686,7 +686,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -735,7 +735,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 003a131bad47..567a904804bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -48,7 +48,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // DB_STENCIL_WRITE_BASE
0x00000000, // DB_STENCIL_WRITE_BASE_HI
0x00000000, // DB_DFSM_CONTROL
- 0x00000000, // DB_RENDER_FILTER
+ 0, // HOLE
0x00000000, // DB_Z_INFO2
0x00000000, // DB_STENCIL_INFO2
0, // HOLE
@@ -259,8 +259,8 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // PA_SC_RIGHT_VERT_GRID
0x00000000, // PA_SC_LEFT_VERT_GRID
0x00000000, // PA_SC_HORIZ_GRID
- 0x00000000, // PA_SC_FOV_WINDOW_LR
- 0x00000000, // PA_SC_FOV_WINDOW_TB
+ 0, // HOLE
+ 0, // HOLE
0, // HOLE
0, // HOLE
0, // HOLE
@@ -701,7 +701,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
{
0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
0x00000000, // VGT_DRAW_PAYLOAD_CNTL
- 0x00000000, // VGT_INDEX_PAYLOAD_CNTL
+ 0, // HOLE
0x00000000, // VGT_INSTANCE_STEP_RATE_0
0x00000000, // VGT_INSTANCE_STEP_RATE_1
0, // HOLE
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index a397111c2ced..f34bc68aadfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -145,20 +145,20 @@ static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 67e670989e81..26378bd6aba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -154,28 +154,28 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris11_golden_settings_a11,
+ ARRAY_SIZE(polaris11_golden_settings_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_settings_a11,
+ ARRAY_SIZE(polaris10_golden_settings_a11));
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d02493cf9175..46550b588982 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -679,55 +679,55 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- ARRAY_SIZE(golden_settings_iceland_a11));
- amdgpu_program_register_sequence(adev,
- iceland_golden_common_all,
- ARRAY_SIZE(iceland_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_golden_common_all,
+ ARRAY_SIZE(iceland_golden_common_all));
break;
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
- amdgpu_program_register_sequence(adev,
- fiji_golden_common_all,
- ARRAY_SIZE(fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_golden_common_all,
+ ARRAY_SIZE(fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
- amdgpu_program_register_sequence(adev,
- tonga_golden_common_all,
- ARRAY_SIZE(tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_golden_common_all,
+ ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- ARRAY_SIZE(golden_settings_polaris11_a11));
- amdgpu_program_register_sequence(adev,
- polaris11_golden_common_all,
- ARRAY_SIZE(polaris11_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris11_golden_common_all,
+ ARRAY_SIZE(polaris11_golden_common_all));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- ARRAY_SIZE(golden_settings_polaris10_a11));
- amdgpu_program_register_sequence(adev,
- polaris10_golden_common_all,
- ARRAY_SIZE(polaris10_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_common_all,
+ ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
if (adev->pdev->revision == 0xc7 &&
((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
@@ -738,26 +738,26 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
}
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- cz_golden_common_all,
- ARRAY_SIZE(cz_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_common_all,
+ ARRAY_SIZE(cz_golden_common_all));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- stoney_golden_common_all,
- ARRAY_SIZE(stoney_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_common_all,
+ ARRAY_SIZE(stoney_golden_common_all));
break;
default:
break;
@@ -5062,8 +5062,9 @@ static int gfx_v8_0_hw_fini(void *handle)
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
return 0;
}
@@ -5480,8 +5481,9 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
return 0;
}
@@ -5492,10 +5494,10 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12))
/* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ?
- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -6472,10 +6474,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
mutex_unlock(&adev->srbm_mutex);
}
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+ bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6c5289ae67be..9f7be230734c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,7 +28,6 @@
#include "soc15.h"
#include "soc15d.h"
-#include "soc15ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -65,152 +64,84 @@ MODULE_FIRMWARE("amdgpu/raven_mec.bin");
MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
-static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
-{
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
+static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
-static const u32 golden_settings_gc_9_0[] =
-{
- SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
- SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
- SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
- SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
- SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
- SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
- SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
- SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
-};
-
-static const u32 golden_settings_gc_9_0_vg10[] =
+static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
- SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
- SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
};
-static const u32 golden_settings_gc_9_1[] =
-{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
- SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
- SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
- SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
- SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
- SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
+static const struct soc15_reg_golden golden_settings_gc_9_1[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
-static const u32 golden_settings_gc_9_1_rv1[] =
+static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
- SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
- SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
};
-static const u32 golden_settings_gc_9_x_common[] =
+static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
- SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
@@ -230,18 +161,18 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
@@ -249,7 +180,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
break;
}
- amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -1137,7 +1068,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
+ adev->gfx.ngg.gds_reserve_addr = SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE);
adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
/* Primitive Buffer */
@@ -1243,7 +1174,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
}
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[0].mem_size,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
(adev->gds.mem.total_size +
adev->gfx.ngg.gds_reserve_size) >>
AMDGPU_GDS_SHIFT);
@@ -1259,7 +1190,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[0].mem_size, 0);
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
amdgpu_ring_commit(ring);
@@ -1598,11 +1529,18 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
for (i = 0; i < 16; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
- SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ if (i == 0) {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ } else {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ tmp = adev->mc.shared_aperture_start >> 48;
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
+ }
}
soc15_grbm_select(adev, 0, 0, 0, 0);
@@ -2474,7 +2412,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
@@ -3146,6 +3084,8 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
+ struct amdgpu_device *adev = ring->adev;
+
gds_base = gds_base >> AMDGPU_GDS_SHIFT;
gds_size = gds_size >> AMDGPU_GDS_SHIFT;
@@ -3157,22 +3097,22 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
/* GDS Base */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].mem_base,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
gds_base);
/* GDS Size */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].mem_size,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
gds_size);
/* GWS */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].gws,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
/* OA */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].oa,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
@@ -3617,13 +3557,9 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
- if (ring->adev->flags & AMD_IS_APU)
- nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
- else
- nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -3643,13 +3579,15 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- nbio_hf_reg->hdp_flush_req_offset,
- nbio_hf_reg->hdp_flush_done_offset,
+ adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio_funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
gfx_v9_0_write_data_to_reg(ring, 0, true,
SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
@@ -3750,10 +3688,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
@@ -3811,6 +3750,8 @@ static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned int flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* we only allocate 32bit for each seq wb address */
BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index f1effadfbaa6..56f5fe4e2fee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "gfxhub_v1_0.h"
-#include "soc15ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "gc/gc_9_0_default.h"
@@ -144,8 +143,15 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ if (adev->mc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -183,31 +189,40 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- int i;
+ unsigned num_level, block_size;
uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->mc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 468281f10e8d..e1a73c43f32d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -222,8 +222,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -395,10 +395,10 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 68a85051f4b7..356a9a71b8cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -67,12 +67,12 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
@@ -240,8 +240,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
}
/**
@@ -480,10 +480,10 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 46ec97e70e5c..fce45578f5fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -120,44 +120,44 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_stoney_common,
- ARRAY_SIZE(golden_settings_stoney_common));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
@@ -405,8 +405,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
}
/**
@@ -677,10 +677,10 @@ static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cc972153d401..b776df4c999f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -25,7 +25,6 @@
#include "gmc_v9_0.h"
#include "amdgpu_atomfirmware.h"
-#include "soc15ip.h"
#include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -35,11 +34,10 @@
#include "mmhub/mmhub_1_0_offset.h"
#include "athub/athub_1_0_offset.h"
+#include "soc15.h"
#include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h"
-#include "nbio_v6_1.h"
-#include "nbio_v7_0.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
@@ -74,16 +72,16 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
-static const u32 golden_settings_mmhub_1_0_0[] =
+static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{
- SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
- SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
+ SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
+ SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
};
-static const u32 golden_settings_athub_1_0_0[] =
+static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
{
- SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
- SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
+ SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
};
/* Ecc related register addresses, (BASE + reg offset) */
@@ -332,10 +330,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
unsigned i, j;
/* flush hdp cache */
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
+ adev->nbio_funcs->hdp_flush(adev);
spin_lock(&adev->mc.invalidate_lock);
@@ -474,11 +469,28 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
+static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
- BUG_ON(addr & 0xFFFF00000000003FULL);
- return addr;
+ if (!(*flags & AMDGPU_PDE_PTE))
+ *addr = adev->vm_manager.vram_base_offset + *addr -
+ adev->mc.vram_start;
+ BUG_ON(*addr & 0xFFFF00000000003FULL);
+
+ if (!adev->mc.translate_further)
+ return;
+
+ if (level == AMDGPU_VM_PDB1) {
+ /* Set the block fragment size */
+ if (!(*flags & AMDGPU_PDE_PTE))
+ *flags |= AMDGPU_PDE_BFS(0x9);
+
+ } else if (level == AMDGPU_VM_PDB0) {
+ if (*flags & AMDGPU_PDE_PTE)
+ *flags &= ~AMDGPU_PDE_PTE;
+ else
+ *flags |= AMDGPU_PTE_TF;
+ }
}
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
@@ -502,6 +514,14 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_gart_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
+ adev->mc.shared_aperture_start = 0x2000000000000000ULL;
+ adev->mc.shared_aperture_end =
+ adev->mc.shared_aperture_start + (4ULL << 30) - 1;
+ adev->mc.private_aperture_start =
+ adev->mc.shared_aperture_end + 1;
+ adev->mc.private_aperture_end =
+ adev->mc.private_aperture_start + (4ULL << 30) - 1;
+
return 0;
}
@@ -633,8 +653,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = 0;
if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -700,8 +720,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
/* size in MB on si */
adev->mc.mc_vram_size =
- ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
- nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
+ adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = adev->mc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -769,11 +788,14 @@ static int gmc_v9_0_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_RAVEN:
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- if (adev->rev_id == 0x0 || adev->rev_id == 0x1)
+ if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
- else
- /* vm_size is 64GB for legacy 2-level page support */
- amdgpu_vm_adjust_size(adev, 64, 9, 1, 48);
+ } else {
+ /* vm_size is 128TB + 512GB for legacy 3-level page support */
+ amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
+ adev->mc.translate_further =
+ adev->vm_manager.num_level > 1;
+ }
break;
case CHIP_VEGA10:
/* XXX Don't know how to get VRAM type yet. */
@@ -883,17 +905,18 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
+
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
@@ -913,9 +936,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
bool value;
u32 tmp;
- amdgpu_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -948,10 +971,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
+ adev->nbio_funcs->hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f33d1ffdb20b..d9e9e52a0def 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1682,8 +1682,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
if (gate) {
/* stop the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
kv_update_uvd_dpm(adev, gate);
if (pi->caps_uvd_pg)
/* power off the UVD block */
@@ -1695,8 +1695,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
/* re-init the UVD block */
kv_update_uvd_dpm(adev, gate);
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index bd160d8700e0..ffd5b7ee49c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "mmhub_v1_0.h"
-#include "soc15ip.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "mmhub/mmhub_1_0_default.h"
@@ -156,10 +155,15 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
- tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
+ if (adev->mc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
@@ -197,32 +201,40 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- int i;
+ unsigned num_level, block_size;
uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->mc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index ad9054e3903c..271452d3999a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -22,7 +22,6 @@
*/
#include "amdgpu.h"
-#include "soc15ip.h"
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
#include "gc/gc_9_0_offset.h"
@@ -254,7 +253,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
/* Trigger recovery due to world switch failure */
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -278,7 +277,7 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
int r;
/* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
+ if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index df52824c0cd4..9fc1c37344ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -279,32 +279,32 @@ void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_mgcg_cgcg_init,
- ARRAY_SIZE(
- xgpu_fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_settings_a10,
- ARRAY_SIZE(
- xgpu_fiji_golden_settings_a10));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_common_all,
- ARRAY_SIZE(
- xgpu_fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_settings_a10,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_settings_a10));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_mgcg_cgcg_init,
- ARRAY_SIZE(
- xgpu_tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_settings_a11,
- ARRAY_SIZE(
- xgpu_tonga_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_common_all,
- ARRAY_SIZE(
- xgpu_tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_settings_a11,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_common_all));
break;
default:
BUG_ON("Doesn't support chip type.\n");
@@ -521,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
}
/* Trigger recovery due to world switch failure */
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -545,7 +545,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
int r;
/* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
+ if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 76db711097c7..d4da663d5eb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -24,7 +24,6 @@
#include "amdgpu_atombios.h"
#include "nbio_v6_1.h"
-#include "soc15ip.h"
#include "nbio/nbio_6_1_default.h"
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
@@ -34,7 +33,7 @@
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -44,19 +43,7 @@ u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
return tmp;
}
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx)
-{
- return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
-}
-
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val)
-{
- WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
-}
-
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -66,26 +53,23 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
}
-static const u32 nbio_sdma_doorbell_range_reg[] =
-{
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
-};
-
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index)
{
- u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
+ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
+
+ u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -93,17 +77,18 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
- WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
+ WREG32(reg, doorbell_range);
+
}
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
{
u32 tmp = 0;
@@ -122,8 +107,8 @@ void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
}
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
+static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
@@ -136,7 +121,7 @@ void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
}
-void nbio_v6_1_ih_control(struct amdgpu_device *adev)
+static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
@@ -152,8 +137,8 @@ void nbio_v6_1_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -180,8 +165,8 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_PCIE(smnCPM_CONTROL, data);
}
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -200,7 +185,8 @@ void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_CNTL2, data);
}
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int data;
@@ -215,9 +201,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
- .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
- .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
+}
+
+static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
+}
+
+static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -232,12 +236,7 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
-const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
- .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
- .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
-};
-
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
+static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -254,7 +253,7 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
}
}
-void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -265,3 +264,25 @@ void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
}
+
+const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
+ .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
+ .get_rev_id = nbio_v6_1_get_rev_id,
+ .mc_access_enable = nbio_v6_1_mc_access_enable,
+ .hdp_flush = nbio_v6_1_hdp_flush,
+ .get_memsize = nbio_v6_1_get_memsize,
+ .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
+ .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v6_1_get_clockgating_state,
+ .ih_control = nbio_v6_1_ih_control,
+ .init_registers = nbio_v6_1_init_registers,
+ .detect_hw_virt = nbio_v6_1_detect_hw_virt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 14ca8d45a46c..0743a6f016f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,30 +26,6 @@
#include "soc15_common.h"
-extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
-int nbio_v6_1_init(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx);
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val);
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev);
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
-void nbio_v6_1_ih_control(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev);
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
-void nbio_v6_1_init_registers(struct amdgpu_device *adev);
+extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 1fb77174e02c..17a9131a4598 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -24,7 +24,6 @@
#include "amdgpu_atombios.h"
#include "nbio_v7_0.h"
-#include "soc15ip.h"
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
@@ -32,7 +31,10 @@
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
+#define smnCPM_CONTROL 0x11180460
+#define smnPCIE_CNTL2 0x11180070
+
+static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -42,19 +44,7 @@ u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
return tmp;
}
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx)
-{
- return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
-}
-
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val)
-{
- WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
-}
-
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -63,26 +53,23 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
}
-static const u32 nbio_sdma_doorbell_range_reg[] =
+static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index)
{
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
-};
+ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index)
-{
- u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
+ u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -90,17 +77,23 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
- WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
+ WREG32(reg, doorbell_range);
}
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
+static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+
+}
+
+static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
@@ -130,8 +123,8 @@ static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t o
WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
}
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -169,7 +162,43 @@ void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
}
-void nbio_v7_0_ih_control(struct amdgpu_device *adev)
+static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CNTL2);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
+ data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CNTL2, data);
+}
+
+static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_PCIE(smnCPM_CONTROL);
+ if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_PCIE(smnPCIE_CNTL2);
+ if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
+static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
@@ -185,9 +214,27 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
+static u32 nbio_v7_0_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v7_0_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v7_0_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
+}
+
+static u32 nbio_v7_0_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
+}
+
const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
- .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
- .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -202,7 +249,35 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
- .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
- .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
+static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
+{
+
+}
+
+const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
+ .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
+ .get_rev_id = nbio_v7_0_get_rev_id,
+ .mc_access_enable = nbio_v7_0_mc_access_enable,
+ .hdp_flush = nbio_v7_0_hdp_flush,
+ .get_memsize = nbio_v7_0_get_memsize,
+ .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
+ .enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_0_get_clockgating_state,
+ .ih_control = nbio_v7_0_ih_control,
+ .init_registers = nbio_v7_0_init_registers,
+ .detect_hw_virt = nbio_v7_0_detect_hw_virt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index df8fa90f40d7..508d549c5029 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,24 +26,6 @@
#include "soc15_common.h"
-extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
-int nbio_v7_0_init(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx);
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val);
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
-void nbio_v7_0_ih_control(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev);
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable);
+extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 78fe3f2917a0..5a9fe24697f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -30,7 +30,6 @@
#include "soc15_common.h"
#include "psp_v10_0.h"
-#include "soc15ip.h"
#include "mp/mp_10_0_offset.h"
#include "gc/gc_9_1_offset.h"
#include "sdma0/sdma0_4_1_offset.h"
@@ -298,9 +297,10 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
}
static int
-psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
+psp_v10_0_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+ unsigned int *sram_data_reg_offset,
+ enum AMDGPU_UCODE_ID ucode_id)
{
int ret = 0;
@@ -395,7 +395,7 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
uint32_t *ucode_mem = NULL;
struct amdgpu_device *adev = psp->adev;
- err = psp_v10_0_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
&fw_sram_data_reg_offset, ucode_type);
if (err)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e75a23d858ef..19bd1934e63d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -31,7 +31,6 @@
#include "soc15_common.h"
#include "psp_v3_1.h"
-#include "soc15ip.h"
#include "mp/mp_9_0_offset.h"
#include "mp/mp_9_0_sh_mask.h"
#include "gc/gc_9_0_offset.h"
@@ -410,9 +409,10 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
}
static int
-psp_v3_1_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
+psp_v3_1_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+ unsigned int *sram_data_reg_offset,
+ enum AMDGPU_UCODE_ID ucode_id)
{
int ret = 0;
@@ -507,7 +507,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
uint32_t *ucode_mem = NULL;
struct amdgpu_device *adev = psp->adev;
- err = psp_v3_1_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
&fw_sram_data_reg_offset, ucode_type);
if (err)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 121e628e7cdb..401552bae7f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -93,12 +93,12 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
@@ -600,7 +600,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -613,7 +613,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -639,7 +639,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -662,7 +662,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -715,7 +715,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index c8c93f9dac21..0735d4d0e56a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -192,47 +192,47 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
break;
default:
break;
@@ -355,7 +355,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 wptr;
- if (ring->use_doorbell) {
+ if (ring->use_doorbell || ring->use_pollmem) {
/* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else {
@@ -380,10 +380,13 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
-
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
+ } else if (ring->use_pollmem) {
+ u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
+
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -718,10 +721,14 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
- if (amdgpu_sriov_vf(adev))
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ if (ring->use_pollmem)
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ ENABLE, 1);
else
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
/* enable DMA RB */
@@ -860,7 +867,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -873,7 +880,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -899,7 +906,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -922,7 +929,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -974,7 +981,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1203,9 +1210,13 @@ static int sdma_v3_0_sw_init(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->use_doorbell = true;
+ ring->doorbell_index = (i == 0) ?
+ AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ } else {
+ ring->use_pollmem = true;
+ }
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4c55f21e37a8..73477c5ed9b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -27,7 +27,6 @@
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
-#include "soc15ip.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "sdma1/sdma1_4_0_offset.h"
@@ -53,95 +52,83 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 golden_settings_sdma_4[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0x003ff006, 0x0003c000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), 0xffffffff, 0x3f000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), 0x003ff000, 0x0003c000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
+static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
-static const u32 golden_settings_sdma_vg10[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
+static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
-static const u32 golden_settings_sdma_4_1[] =
+static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
-static const u32 golden_settings_sdma_rv1[] =
+static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
};
-static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
+static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
{
- u32 base = 0;
-
- switch (instance) {
- case 0:
- base = SDMA0_BASE.instance[0].segment[0];
- break;
- case 1:
- base = SDMA1_BASE.instance[0].segment[0];
- break;
- default:
- BUG();
- break;
- }
-
- return base + internal_offset;
+ return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
+ (adev->reg_offset[SDMA1_HWIP][0][0] + offset));
}
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_vg10,
ARRAY_SIZE(golden_settings_sdma_vg10));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1));
break;
@@ -265,8 +252,8 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
wptr = &local_wptr;
- lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
me, highbit, lowbit);
@@ -315,8 +302,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
}
@@ -370,13 +357,9 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
- if (ring->adev->flags & AMD_IS_APU)
- nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
- else
- nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring == &ring->adev->sdma.instance[0].ring)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -386,8 +369,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_done_offset << 2);
- amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_req_offset << 2);
+ amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -396,6 +379,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
@@ -460,12 +445,12 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
sdma0->ready = false;
@@ -522,18 +507,18 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
+ f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
@@ -557,9 +542,9 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
+ f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), f32_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
}
}
@@ -587,48 +572,48 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
/* set the wb address whether it's enabled or not */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
}
- doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
@@ -637,55 +622,53 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
- else
- nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+ adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index);
if (amdgpu_sriov_vf(adev))
sdma_v4_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
/* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
+ temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), temp);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
- temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
+ temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
}
/* setup the wptr shadow polling */
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
if (amdgpu_sriov_vf(adev))
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
else
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->ready = true;
@@ -816,12 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
for (j = 0; j < fw_size; j++)
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
}
return 0;
@@ -886,7 +869,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -899,7 +882,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -925,7 +908,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -948,7 +931,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u32 tmp = 0;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -1000,7 +983,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1156,10 +1139,11 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
@@ -1317,7 +1301,7 @@ static bool sdma_v4_0_is_idle(void *handle)
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
+ u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
@@ -1333,8 +1317,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
- sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
+ sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
+ sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
@@ -1358,8 +1342,8 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
- sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
+ sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+ sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 49eef3090f08..543101d5a5ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1390,65 +1390,65 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TAHITI:
- amdgpu_program_register_sequence(adev,
- tahiti_golden_registers,
- ARRAY_SIZE(tahiti_golden_registers));
- amdgpu_program_register_sequence(adev,
- tahiti_golden_rlc_registers,
- ARRAY_SIZE(tahiti_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- tahiti_mgcg_cgcg_init,
- ARRAY_SIZE(tahiti_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- tahiti_golden_registers2,
- ARRAY_SIZE(tahiti_golden_registers2));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_registers,
+ ARRAY_SIZE(tahiti_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_rlc_registers,
+ ARRAY_SIZE(tahiti_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_mgcg_cgcg_init,
+ ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_registers2,
+ ARRAY_SIZE(tahiti_golden_registers2));
break;
case CHIP_PITCAIRN:
- amdgpu_program_register_sequence(adev,
- pitcairn_golden_registers,
- ARRAY_SIZE(pitcairn_golden_registers));
- amdgpu_program_register_sequence(adev,
- pitcairn_golden_rlc_registers,
- ARRAY_SIZE(pitcairn_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- pitcairn_mgcg_cgcg_init,
- ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_golden_registers,
+ ARRAY_SIZE(pitcairn_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_golden_rlc_registers,
+ ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
break;
case CHIP_VERDE:
- amdgpu_program_register_sequence(adev,
- verde_golden_registers,
- ARRAY_SIZE(verde_golden_registers));
- amdgpu_program_register_sequence(adev,
- verde_golden_rlc_registers,
- ARRAY_SIZE(verde_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- verde_mgcg_cgcg_init,
- ARRAY_SIZE(verde_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- verde_pg_init,
- ARRAY_SIZE(verde_pg_init));
+ amdgpu_device_program_register_sequence(adev,
+ verde_golden_registers,
+ ARRAY_SIZE(verde_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ verde_golden_rlc_registers,
+ ARRAY_SIZE(verde_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ verde_mgcg_cgcg_init,
+ ARRAY_SIZE(verde_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ verde_pg_init,
+ ARRAY_SIZE(verde_pg_init));
break;
case CHIP_OLAND:
- amdgpu_program_register_sequence(adev,
- oland_golden_registers,
- ARRAY_SIZE(oland_golden_registers));
- amdgpu_program_register_sequence(adev,
- oland_golden_rlc_registers,
- ARRAY_SIZE(oland_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- oland_mgcg_cgcg_init,
- ARRAY_SIZE(oland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ oland_golden_registers,
+ ARRAY_SIZE(oland_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ oland_golden_rlc_registers,
+ ARRAY_SIZE(oland_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ ARRAY_SIZE(oland_mgcg_cgcg_init));
break;
case CHIP_HAINAN:
- amdgpu_program_register_sequence(adev,
- hainan_golden_registers,
- ARRAY_SIZE(hainan_golden_registers));
- amdgpu_program_register_sequence(adev,
- hainan_golden_registers2,
- ARRAY_SIZE(hainan_golden_registers2));
- amdgpu_program_register_sequence(adev,
- hainan_mgcg_cgcg_init,
- ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_golden_registers,
+ ARRAY_SIZE(hainan_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_golden_registers2,
+ ARRAY_SIZE(hainan_golden_registers2));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_mgcg_cgcg_init,
+ ARRAY_SIZE(hainan_mgcg_cgcg_init));
break;
@@ -1959,42 +1959,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
- amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
- /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
- /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_OLAND:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
- amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
- /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
- /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index ee469a906cd3..9adca5d8b045 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -221,7 +221,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -234,7 +234,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 4);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -258,7 +258,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -281,7 +281,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -328,7 +328,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f134ca0c093c..8f2cff7b7e0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -34,7 +34,6 @@
#include "atom.h"
#include "amd_pcie.h"
-#include "soc15ip.h"
#include "uvd/uvd_7_0_offset.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -101,15 +100,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- const struct nbio_pcie_index_data *nbio_pcie_id;
-
- if (adev->flags & AMD_IS_APU)
- nbio_pcie_id = &nbio_v7_0_pcie_index_data;
- else
- nbio_pcie_id = &nbio_v6_1_pcie_index_data;
-
- address = nbio_pcie_id->index_offset;
- data = nbio_pcie_id->data_offset;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -122,15 +114,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- const struct nbio_pcie_index_data *nbio_pcie_id;
- if (adev->flags & AMD_IS_APU)
- nbio_pcie_id = &nbio_v7_0_pcie_index_data;
- else
- nbio_pcie_id = &nbio_v6_1_pcie_index_data;
-
- address = nbio_pcie_id->index_offset;
- data = nbio_pcie_id->data_offset;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -242,41 +228,9 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return nbio_v7_0_get_memsize(adev);
- else
- return nbio_v6_1_get_memsize(adev);
+ return adev->nbio_funcs->get_memsize(adev);
}
-static const u32 vega10_golden_init[] =
-{
-};
-
-static const u32 raven_golden_init[] =
-{
-};
-
-static void soc15_init_golden_registers(struct amdgpu_device *adev)
-{
- /* Some of the registers might be dependent on GRBM_GFX_INDEX */
- mutex_lock(&adev->grbm_idx_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
- vega10_golden_init,
- ARRAY_SIZE(vega10_golden_init));
- break;
- case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
- raven_golden_init,
- ARRAY_SIZE(raven_golden_init));
- break;
- default:
- break;
- }
- mutex_unlock(&adev->grbm_idx_mutex);
-}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
@@ -332,25 +286,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
- { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
- { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
+struct soc15_allowed_register_entry {
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ bool grbm_indexed;
+};
+
+
+static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
+ { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
+ { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
};
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
@@ -377,12 +340,9 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
if (indexed) {
return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
} else {
- switch (reg_offset) {
- case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
+ if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
return adev->gfx.config.gb_addr_config;
- default:
- return RREG32(reg_offset);
- }
+ return RREG32(reg_offset);
}
}
@@ -390,10 +350,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
+ struct soc15_allowed_register_entry *en;
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
- if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
+ en = &soc15_allowed_read_registers[i];
+ if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = soc15_get_register_value(adev,
@@ -404,6 +367,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
+
+/**
+ * soc15_program_register_sequence - program an array of registers.
+ *
+ * @adev: amdgpu_device pointer
+ * @regs: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+
+void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const struct soc15_reg_golden *regs,
+ const u32 array_size)
+{
+ const struct soc15_reg_golden *entry;
+ u32 tmp, reg;
+ int i;
+
+ for (i = 0; i < array_size; ++i) {
+ entry = &regs[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+
+ if (entry->and_mask == 0xffffffff) {
+ tmp = entry->or_mask;
+ } else {
+ tmp = RREG32(reg);
+ tmp &= ~(entry->and_mask);
+ tmp |= entry->or_mask;
+ }
+ WREG32(reg, tmp);
+ }
+
+}
+
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -428,9 +428,8 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = (adev->flags & AMD_IS_APU) ?
- nbio_v7_0_get_memsize(adev) :
- nbio_v6_1_get_memsize(adev);
+ u32 memsize = adev->nbio_funcs->get_memsize(adev);
+
if (memsize != 0xffffffff)
break;
udelay(1);
@@ -495,14 +494,10 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
- if (adev->flags & AMD_IS_APU) {
- nbio_v7_0_enable_doorbell_aperture(adev, enable);
- } else {
- nbio_v6_1_enable_doorbell_aperture(adev, enable);
- nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
- }
+ adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -516,50 +511,65 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
int soc15_set_ip_blocks(struct amdgpu_device *adev)
{
- nbio_v6_1_detect_hw_virt(adev);
+ /* Set IP register base before any HW register access */
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (adev->flags & AMD_IS_APU)
+ adev->nbio_funcs = &nbio_v7_0_funcs;
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+ adev->nbio_funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
- amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
case CHIP_RAVEN:
- amdgpu_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
default:
return -EINVAL;
@@ -570,10 +580,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return nbio_v7_0_get_rev_id(adev);
- else
- return nbio_v6_1_get_rev_id(adev);
+ return adev->nbio_funcs->get_rev_id(adev);
}
static const struct amdgpu_asic_funcs soc15_asic_funcs =
@@ -609,8 +616,8 @@ static int soc15_common_early_init(void *handle)
adev->asic_funcs = &soc15_asic_funcs;
- if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
+ (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true;
adev->rev_id = soc15_get_rev_id(adev);
@@ -675,7 +682,7 @@ static int soc15_common_early_init(void *handle)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_get_pcie_info(adev);
+ amdgpu_device_get_pcie_info(adev);
return 0;
}
@@ -709,15 +716,12 @@ static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* move the golden regs per IP block */
- soc15_init_golden_registers(adev);
/* enable pcie gen2/3 link */
soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- if (!(adev->flags & AMD_IS_APU))
- nbio_v6_1_init_registers(adev);
+ adev->nbio_funcs->init_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -878,9 +882,9 @@ static int soc15_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
- nbio_v6_1_update_medium_grain_clock_gating(adev,
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- nbio_v6_1_update_medium_grain_light_sleep(adev,
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -894,9 +898,9 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_RAVEN:
- nbio_v7_0_update_medium_grain_clock_gating(adev,
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- nbio_v6_1_update_medium_grain_light_sleep(adev,
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -921,7 +925,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- nbio_v6_1_get_clockgating_state(adev, flags);
+ adev->nbio_funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index acb3cdb119f2..26b3feac5d06 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -29,8 +29,28 @@
extern const struct amd_ip_funcs soc15_common_ip_funcs;
+struct soc15_reg_golden {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+ u32 and_mask;
+ u32 or_mask;
+};
+
+#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
+
+#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
+ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
+void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const struct soc15_reg_golden *registers,
+ const u32 array_size);
+
+int vega10_reg_base_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 7a8e4e28abb2..def865067edd 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -24,72 +24,28 @@
#ifndef __SOC15_COMMON_H__
#define __SOC15_COMMON_H__
-struct nbio_hdp_flush_reg {
- u32 hdp_flush_req_offset;
- u32 hdp_flush_done_offset;
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
-};
-
-struct nbio_pcie_index_data {
- u32 index_offset;
- u32 data_offset;
-};
-
/* Register Access Macros */
-#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg)))))
+#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define WREG32_FIELD15(ip, idx, reg, field, val) \
- WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+ (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+ & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define RREG32_SOC15(ip, inst, reg) \
- RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))))
+ RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
- RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))) + offset)
+ RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
#define WREG32_SOC15(ip, inst, reg, value) \
- WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
- WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+ WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
- WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))) + offset, value)
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 0e8b887cf03e..86123448a8ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -412,10 +412,10 @@ static int uvd_v6_0_sw_init(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
ring = &adev->uvd.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@@ -456,7 +456,7 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 660fa41dc877..4ec4447d33c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -29,7 +29,6 @@
#include "soc15_common.h"
#include "mmsch_v1_0.h"
-#include "soc15ip.h"
#include "uvd/uvd_7_0_offset.h"
#include "uvd/uvd_7_0_sh_mask.h"
#include "vce/vce_4_0_offset.h"
@@ -385,7 +384,7 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -416,8 +415,8 @@ static int uvd_v7_0_sw_init(void *handle)
}
ring = &adev->uvd.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@@ -472,7 +471,7 @@ static int uvd_v7_0_sw_fini(void *handle)
if (r)
return r;
- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
@@ -1086,6 +1085,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
@@ -1123,6 +1124,7 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
@@ -1141,6 +1143,8 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
*/
static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
amdgpu_ring_write(ring, 0);
@@ -1155,6 +1159,8 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
@@ -1214,6 +1220,8 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
amdgpu_ring_write(ring, vm_id);
@@ -1250,6 +1258,8 @@ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -1264,6 +1274,8 @@ static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1, uint32_t mask)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -1283,11 +1295,12 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- uint32_t data0, data1, mask;
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
+ uint32_t data0, data1, mask;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
@@ -1314,6 +1327,16 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
}
+static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+
+}
+
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
@@ -1324,10 +1347,11 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
@@ -1681,7 +1705,7 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
- .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
+ .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr,
@@ -1700,7 +1724,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = uvd_v7_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index f2f713650074..308949d6edde 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -32,7 +32,6 @@
#include "soc15_common.h"
#include "mmsch_v1_0.h"
-#include "soc15ip.h"
#include "vce/vce_4_0_offset.h"
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
@@ -424,7 +423,7 @@ static int vce_v4_0_sw_init(void *handle)
if (r)
return r;
- size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
+ size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
size += VCE_V4_0_FW_SIZE;
@@ -970,10 +969,11 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index e4673f792545..deb3fba790a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -28,7 +28,6 @@
#include "soc15d.h"
#include "soc15_common.h"
-#include "soc15ip.h"
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
#include "hdp/hdp_4_0_offset.h"
@@ -744,6 +743,8 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, 0);
@@ -761,6 +762,8 @@ static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
@@ -777,6 +780,8 @@ static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
@@ -812,6 +817,8 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
*/
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
@@ -828,6 +835,8 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
amdgpu_ring_write(ring, vm_id);
@@ -846,6 +855,8 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -860,6 +871,8 @@ static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1, uint32_t mask)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -879,11 +892,12 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- uint32_t data0, data1, mask;
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
+ uint32_t data0, data1, mask;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
@@ -1011,10 +1025,11 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
@@ -1077,6 +1092,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+
+}
+
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1100,7 +1126,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
- .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
+ .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
@@ -1118,7 +1144,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
.test_ring = amdgpu_vcn_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = vcn_v1_0_ring_insert_nop,
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index ca778cd4e6e8..e1d7dae0989b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -25,8 +25,6 @@
#include "amdgpu_ih.h"
#include "soc15.h"
-
-#include "soc15ip.h"
#include "oss/osssys_4_0_offset.h"
#include "oss/osssys_4_0_sh_mask.h"
@@ -97,10 +95,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_ih_control(adev);
- else
- nbio_v6_1_ih_control(adev);
+ adev->nbio_funcs->ih_control(adev);
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -151,10 +146,8 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ENABLE, 0);
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
- else
- nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
+ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
new file mode 100644
index 000000000000..b7bdd04793d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "soc15ip.h"
+
+int vega10_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke beend by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+ adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
+ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
+
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index bb8ca9489546..da2b99c2d95f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -282,29 +282,29 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
break;
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
@@ -449,14 +449,18 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
- /* bit0: 0 means pf and 1 means vf */
- /* bit31: 0 means disable IOV and 1 means enable */
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ uint32_t reg = 0;
+
+ if (adev->asic_type == CHIP_TONGA ||
+ adev->asic_type == CHIP_FIJI) {
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ }
if (reg == 0) {
if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
@@ -667,7 +671,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -891,8 +895,8 @@ static int vi_common_early_init(void *handle)
adev->asic_funcs = &vi_asic_funcs;
- if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
+ (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
smc_enabled = true;
adev->rev_id = vi_get_rev_id(adev);
@@ -1074,7 +1078,7 @@ static int vi_common_early_init(void *handle)
/* vi use smc load by default */
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_get_pcie_info(adev);
+ amdgpu_device_get_pcie_info(adev);
return 0;
}
@@ -1487,115 +1491,115 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
- amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
break;
case CHIP_FIJI:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
case CHIP_TONGA:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
- amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
case CHIP_CARRIZO:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
- amdgpu_ip_block_add(adev, &acp_ip_block);
+ amdgpu_device_ip_block_add(adev, &acp_ip_block);
#endif
break;
case CHIP_STONEY:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
- amdgpu_ip_block_add(adev, &acp_ip_block);
+ amdgpu_device_ip_block_add(adev, &acp_ip_block);
#endif
break;
default:
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 46464678f2b3..357d59648401 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -105,3 +105,6 @@ useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
bypassing the i2c device and goes directly to HW. This should be changed.
+
+21. Remove vector.c from dc/basics. It's used in DDC code which can probably
+be simplified enough to no longer need a vector implementation.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ccbf10e3bbb6..1ce4c98385e3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -431,9 +431,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core initialized!\n");
+ DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
} else {
- DRM_INFO("Display Core failed to initialize!\n");
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2351,7 +2351,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dm_connector_state *dm_state)
{
struct drm_display_mode *preferred_mode = NULL;
- const struct drm_connector *drm_connector;
+ struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@@ -2370,11 +2370,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!aconnector->dc_sink) {
/*
- * Exclude MST from creating fake_sink
- * TODO: need to enable MST into fake_sink feature
+ * Create dc_sink when necessary to MST
+ * Don't apply fake_sink to MST
*/
- if (aconnector->mst_port)
- goto stream_create_fail;
+ if (aconnector->mst_port) {
+ dm_dp_mst_dc_sink_create(drm_connector);
+ goto mst_dc_sink_create_done;
+ }
if (create_fake_sink(aconnector))
goto stream_create_fail;
@@ -2425,6 +2427,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream_create_fail:
dm_state_null:
drm_connector_null:
+mst_dc_sink_create_done:
return stream;
}
@@ -2725,8 +2728,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
};
struct edid *edid;
- if (!aconnector->base.edid_blob_ptr ||
- !aconnector->base.edid_blob_ptr->data) {
+ if (!aconnector->base.edid_blob_ptr) {
DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
aconnector->base.name);
@@ -4514,18 +4516,15 @@ static int dm_update_crtcs_state(struct dc *dc,
__func__, acrtc->base.base.id);
break;
}
- }
- if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
- dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
-
- new_crtc_state->mode_changed = false;
-
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
- new_crtc_state->mode_changed);
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
}
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 8a1e4f5dbd64..2faa77a7eeda 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+
+ bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 707928b88448..f3d87f418d2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -180,6 +180,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
return drm_add_edid_modes(connector, edid);
}
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)aconnector->edid,
+ (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, aconnector->edid);
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -306,6 +342,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
+ aconnector->mst_connected = true;
return &aconnector->base;
}
}
@@ -358,6 +395,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
+ aconnector->mst_connected = true;
+
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -389,6 +428,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
+
+ aconnector->mst_connected = false;
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -399,10 +440,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+ mutex_lock(&connector->dev->mode_config.mutex);
+ drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -411,6 +460,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
drm_connector_register(connector);
+ if (aconnector->mst_connected)
+ dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..8cf51da26657 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 6af8c8a9ad80..bca33bd9a0d2 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,7 +24,7 @@
# It provides the general basic services required by other DAL
# subcomponents.
-BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o \
logger.o log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 23c9a0ec0181..310964915a83 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -46,7 +46,7 @@ uint16_t fixed_point_to_int_frac(
arg));
if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
- numerator = (uint16_t)dal_fixed31_32_floor(
+ numerator = (uint16_t)dal_fixed31_32_round(
dal_fixed31_32_mul_int(
arg,
divisor));
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 26936892c6f5..011a97f82fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -554,6 +554,22 @@ static inline uint32_t ux_dy(
return result | fractional_part;
}
+static inline uint32_t clamp_ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits,
+ uint32_t min_clamp)
+{
+ uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits);
+
+ if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
+ return (1 << (integer_bits + fractional_bits)) - 1;
+ else if (truncated_val > min_clamp)
+ return truncated_val;
+ else
+ return min_clamp;
+}
+
uint32_t dal_fixed31_32_u2d19(
struct fixed31_32 arg)
{
@@ -565,3 +581,15 @@ uint32_t dal_fixed31_32_u0d19(
{
return ux_dy(arg.value, 0, 19);
}
+
+uint32_t dal_fixed31_32_clamp_u0d14(
+ struct fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+}
+
+uint32_t dal_fixed31_32_clamp_u0d10(
+ struct fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 86e6438c5cf3..c00e405b63e8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -190,6 +190,7 @@ static struct graphics_object_id bios_parser_get_connector_id(
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct graphics_object_id object_id = dal_graphics_object_id_init(
0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+ uint16_t id;
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
@@ -197,12 +198,19 @@ static struct graphics_object_id bios_parser_get_connector_id(
ATOM_OBJECT_TABLE *tbl =
GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
- if (tbl && tbl->ucNumberOfObjects > i) {
- const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ if (!tbl) {
+ dm_error("Can't get connector table from atom bios.\n");
+ return object_id;
+ }
- object_id = object_id_from_bios_object_id(id);
+ if (tbl->ucNumberOfObjects <= i) {
+ dm_error("Can't find connector id %d in connector table of size %d.\n",
+ i, tbl->ucNumberOfObjects);
+ return object_id;
}
+ id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ object_id = object_id_from_bios_object_id(id);
return object_id;
}
@@ -2254,6 +2262,52 @@ static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
return BP_RESULT_OK;
}
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+ bool rc = true;
+
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ rc = false;
+ break;
+ case OBJECT_TYPE_GPU:
+ case OBJECT_TYPE_ENGINE:
+ /* do NOT check for id.id == 0 */
+ if (id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ default:
+ if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2)
+{
+ if (false == dal_graphics_object_id_is_valid(id1)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id1'!\n", __func__);
+ return false;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(id2)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id2'!\n", __func__);
+ return false;
+ }
+
+ if (id1.id == id2.id && id1.enum_id == id2.enum_id
+ && id1.type == id2.type)
+ return true;
+
+ return false;
+}
+
static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
struct graphics_object_id id)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 3f7b2dabc2b0..1aefed8cf98b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -387,6 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+ dm_error("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -910,6 +911,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+ dm_error("Don't have set_pixel_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -1227,6 +1230,8 @@ static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
enable_spread_spectrum_on_ppll_v3;
break;
default:
+ dm_error("Don't have enable_spread_spectrum_on_ppll for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
break;
}
@@ -1422,6 +1427,8 @@ static void init_adjust_display_pll(struct bios_parser *bp)
bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
break;
default:
+ dm_error("Don't have adjust_display_pll for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
bp->cmd_tbl.adjust_display_pll = NULL;
break;
}
@@ -1695,6 +1702,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+ dm_error("Don't have set_crtc_timing for dtd v%d\n",
+ dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1704,6 +1713,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
break;
default:
+ dm_error("Don't have set_crtc_timing for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1890,6 +1901,8 @@ static void init_select_crtc_source(struct bios_parser *bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+ dm_error("Don't select_crtc_source enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -1997,6 +2010,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+ dm_error("Don't have enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -2103,6 +2118,8 @@ static void init_program_clock(struct bios_parser *bp)
bp->cmd_tbl.program_clock = program_clock_v6;
break;
default:
+ dm_error("Don't have program_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.program_clock = NULL;
break;
}
@@ -2324,6 +2341,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+ dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating));
bp->cmd_tbl.enable_disp_power_gating = NULL;
break;
}
@@ -2371,6 +2390,8 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
+ dm_error("Don't have set_dce_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index ba68693758a7..946db12388d6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -118,6 +118,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
+ dm_error("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = NULL;
break;
}
@@ -205,6 +206,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+ dm_error("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -268,6 +270,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+ dm_error("Don't have set_pixel_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -379,6 +383,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+ dm_error("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -498,6 +503,8 @@ static void init_select_crtc_source(struct bios_parser *bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+ dm_error("Don't select_crtc_source enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -565,6 +572,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+ dm_error("Don't have enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -661,6 +670,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+ dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
bp->cmd_tbl.enable_disp_power_gating = NULL;
break;
}
@@ -710,6 +721,8 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
+ dm_error("Don't have set_dce_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 6347712db834..2e11fac2a63d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -29,6 +29,15 @@
#include "core_types.h"
#include "dal_asic_id.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/*******************************************************************************
* Private Functions
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 626f9cf8aad2..5e2ea12fbb73 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -27,6 +27,15 @@
#include "dcn_calc_auto.h"
#include "dcn_calc_math.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/*REVISION#250*/
void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
{
@@ -773,11 +782,11 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->dst_y_after_scaler = 0.0;
}
v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
- v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
- v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
- v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
- v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+ v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+ v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+ v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k];
v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
if (v->pte_enable == dcn_bw_yes) {
v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
index b6abe0f3bb15..7600a4a4abc7 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
@@ -25,37 +25,44 @@
#include "dcn_calc_math.h"
+#define isNaN(number) ((number) != (number))
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
float dcn_bw_mod(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 - arg1 * ((int) (arg1 / arg2));
}
float dcn_bw_min2(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 < arg2 ? arg1 : arg2;
}
unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
{
- if (arg1 != arg1)
- return arg2;
- if (arg2 != arg2)
- return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
float dcn_bw_max2(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index a4fbca34bcdf..331891c2c71a 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -33,6 +33,15 @@
#include "dcn10/dcn10_resource.h"
#include "dcn_calc_math.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/* Defaults from spreadsheet rev#247 */
const struct dcn_soc_bounding_box dcn10_soc_defaults = {
/* latencies */
@@ -878,6 +887,17 @@ bool dcn_validate_bandwidth(
+ pipe->bottom_pipe->plane_res.scl_data.recout.width;
}
+ if (pipe->plane_state->rotation % 2 == 0) {
+ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
+ } else {
+ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
+ }
v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
@@ -888,6 +908,15 @@ bool dcn_validate_bandwidth(
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ if (v->override_hta_pschroma[input_idx] == 1)
+ v->override_hta_pschroma[input_idx] = 2;
+ if (v->override_vta_pschroma[input_idx] == 1)
+ v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
}
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
@@ -985,9 +1014,9 @@ bool dcn_validate_bandwidth(
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
continue;
- pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
- pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
- pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1026,9 +1055,9 @@ bool dcn_validate_bandwidth(
TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* update previously split pipe */
- hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
- hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
- hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1556,35 +1585,6 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
dc->dcn_ip->dcfclk_cstate_latency);
- dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
-
- dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
- dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
- dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
- dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
-
- dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
- dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
- dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
- dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
-
- dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
- dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
- dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
- dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
-
- dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
- dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
- dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
- dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
-
- dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
- dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
- dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
- dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d1488d5ee028..35e84ed031de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -283,19 +283,17 @@ static bool construct(struct dc *dc,
const struct dc_init_data *init_params)
{
struct dal_logger *logger;
- struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
- struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
- GFP_KERNEL);
- struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
- GFP_KERNEL);
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
- struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
- GFP_KERNEL);
- struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
goto fail;
@@ -303,6 +301,7 @@ static bool construct(struct dc *dc,
dc->bw_dceip = dc_dceip;
+ dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
if (!dc_vbios) {
dm_error("%s: failed to create vbios\n", __func__);
goto fail;
@@ -310,6 +309,7 @@ static bool construct(struct dc *dc,
dc->bw_vbios = dc_vbios;
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
goto fail;
@@ -317,6 +317,7 @@ static bool construct(struct dc *dc,
dc->dcn_soc = dcn_soc;
+ dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
if (!dcn_ip) {
dm_error("%s: failed to create dcn_ip\n", __func__);
goto fail;
@@ -325,11 +326,18 @@ static bool construct(struct dc *dc,
dc->dcn_ip = dcn_ip;
#endif
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
if (!dc_ctx) {
dm_error("%s: failed to create ctx\n", __func__);
goto fail;
}
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc->ctx = dc_ctx;
+
dc->current_state = dc_create_state();
if (!dc->current_state) {
@@ -337,11 +345,6 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc_ctx->cgs_device = init_params->cgs_device;
- dc_ctx->driver_context = init_params->driver;
- dc_ctx->dc = dc;
- dc_ctx->asic_id = init_params->asic_id;
-
/* Create logger */
logger = dal_logger_create(dc_ctx, init_params->log_mask);
@@ -351,11 +354,10 @@ static bool construct(struct dc *dc,
goto fail;
}
dc_ctx->logger = logger;
- dc->ctx = dc_ctx;
- dc->ctx->dce_environment = init_params->dce_environment;
+ dc_ctx->dce_environment = init_params->dce_environment;
dc_version = resource_parse_asic_id(init_params->asic_id);
- dc->ctx->dce_version = dc_version;
+ dc_ctx->dce_version = dc_version;
#if defined(CONFIG_DRM_AMD_DC_FBC)
dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
@@ -578,7 +580,7 @@ static void program_timing_sync(
for (j = 0; j < group_size; j++) {
struct pipe_ctx *temp;
- if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
if (j == 0)
break;
@@ -591,7 +593,7 @@ static void program_timing_sync(
/* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -786,6 +788,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ dc->optimized_required = false;
+
/* 3rd param should be true, temp w/a for RV*/
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
@@ -981,6 +985,11 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
update_flags->bits.per_pixel_alpha_change = 1;
+ if (u->plane_info->dcc.enable != u->surface->dcc.enable
+ || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+ update_flags->bits.dcc_change = 1;
+
if (pixel_format_to_bpp(u->plane_info->format) !=
pixel_format_to_bpp(u->surface->format))
/* different bytes per element will require full bandwidth
@@ -1178,12 +1187,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL) {
dc->hwss.set_bandwidth(dc, context, false);
context_clock_trace(dc, context);
-
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
- }
}
if (surface_count == 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 71993d5983bf..ebc96b720083 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -28,6 +28,8 @@
#include "timing_generator.h"
#include "hw_sequencer.h"
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
/* used as index in array of black_color_format */
enum black_color_format {
BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
@@ -38,6 +40,15 @@ enum black_color_format {
BLACK_COLOR_FORMAT_DEBUG,
};
+enum dc_color_space_type {
+ COLOR_SPACE_RGB_TYPE,
+ COLOR_SPACE_RGB_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR601_TYPE,
+ COLOR_SPACE_YCBCR709_TYPE,
+ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR709_LIMITED_TYPE
+};
+
static const struct tg_color black_color_format[] = {
/* BlackColorFormat_RGB_FullRange */
{0, 0, 0},
@@ -53,6 +64,140 @@ static const struct tg_color black_color_format[] = {
{0xff, 0xff, 0},
};
+struct out_csc_color_matrix_type {
+ enum dc_color_space_type color_space_type;
+ uint16_t regval[12];
+};
+
+static const struct out_csc_color_matrix_type output_csc_matrix[] = {
+ { COLOR_SPACE_RGB_TYPE,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ { COLOR_SPACE_RGB_LIMITED_TYPE,
+ { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
+ { COLOR_SPACE_YCBCR601_TYPE,
+ { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
+ 0xF6B7, 0xE04, 0x1004} },
+ { COLOR_SPACE_YCBCR709_TYPE,
+ { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
+ 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+
+ /* TODO: correct values below */
+ { COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+ { COLOR_SPACE_YCBCR709_LIMITED_TYPE,
+ { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+};
+
+static bool is_rgb_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_XR_RGB ||
+ color_space == COLOR_SPACE_MSREF_SCRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+ color_space == COLOR_SPACE_ADOBERGB ||
+ color_space == COLOR_SPACE_DCIP3 ||
+ color_space == COLOR_SPACE_DOLBYVISION)
+ ret = true;
+ return ret;
+}
+
+static bool is_rgb_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr601_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR601 ||
+ color_space == COLOR_SPACE_XV_YCC_601)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr601_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr709_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR709 ||
+ color_space == COLOR_SPACE_XV_YCC_709)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr709_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ ret = true;
+ return ret;
+}
+enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+{
+ enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
+
+ if (is_rgb_type(color_space))
+ type = COLOR_SPACE_RGB_TYPE;
+ else if (is_rgb_limited_type(color_space))
+ type = COLOR_SPACE_RGB_LIMITED_TYPE;
+ else if (is_ycbcr601_type(color_space))
+ type = COLOR_SPACE_YCBCR601_TYPE;
+ else if (is_ycbcr709_type(color_space))
+ type = COLOR_SPACE_YCBCR709_TYPE;
+ else if (is_ycbcr601_limited_type(color_space))
+ type = COLOR_SPACE_YCBCR601_LIMITED_TYPE;
+ else if (is_ycbcr709_limited_type(color_space))
+ type = COLOR_SPACE_YCBCR709_LIMITED_TYPE;
+
+ return type;
+}
+
+const uint16_t *find_color_matrix(enum dc_color_space color_space,
+ uint32_t *array_size)
+{
+ int i;
+ enum dc_color_space_type type;
+ const uint16_t *val = NULL;
+ int arr_size = NUM_ELEMENTS(output_csc_matrix);
+
+ type = get_color_space_type(color_space);
+ for (i = 0; i < arr_size; i++)
+ if (output_csc_matrix[i].color_space_type == type) {
+ val = output_csc_matrix[i].regval;
+ *array_size = 12;
+ break;
+ }
+
+ return val;
+}
+
+
void color_space_to_black_color(
const struct dc *dc,
enum dc_color_space colorspace,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7b0e43c0685c..a37428271573 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -938,8 +938,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
- dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
- __func__, init_params->connector_index);
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
@@ -1271,6 +1272,24 @@ static enum dc_status enable_link_dp(
return status;
}
+static enum dc_status enable_link_edp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+
+ status = enable_link_dp(state, pipe_ctx);
+
+ link->dc->hwss.edp_backlight_control(link, true);
+
+ return status;
+}
+
static enum dc_status enable_link_dp_mst(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
@@ -1746,9 +1765,11 @@ static enum dc_status enable_link(
enum dc_status status = DC_ERROR_UNEXPECTED;
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_EDP:
status = enable_link_dp(state, pipe_ctx);
break;
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_edp(state, pipe_ctx);
+ break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
status = enable_link_dp_mst(state, pipe_ctx);
msleep(200);
@@ -1801,7 +1822,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal);
}
-bool dp_active_dongle_validate_timing(
+static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dc_dongle_caps *dongle_caps)
{
@@ -1833,6 +1854,8 @@ bool dp_active_dongle_validate_timing(
/* Check Color Depth and Pixel Clock */
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
required_pix_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ required_pix_clk = required_pix_clk * 2 / 3;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
@@ -1907,12 +1930,18 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
unsigned int controller_id = 0;
+ bool use_smooth_brightness = true;
int i;
- if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ if ((dmcu == NULL) ||
+ (abm == NULL) ||
+ (abm->funcs->set_backlight_level == NULL))
return false;
+ use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
"New Backlight level: %d (0x%X)\n", level, level);
@@ -1935,7 +1964,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
abm,
level,
frame_ramp,
- controller_id);
+ controller_id,
+ use_smooth_brightness);
}
return true;
@@ -1952,144 +1982,6 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
return true;
}
-bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
-{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- if (dmcu != NULL && link->psr_enabled)
- dmcu->funcs->get_psr_state(dmcu, psr_state);
-
- return true;
-}
-
-bool dc_link_setup_psr(struct dc_link *link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context)
-{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int i;
-
- psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
-
- if (link != NULL &&
- dmcu != NULL) {
- /* updateSinkPsrDpcdConfig*/
- union dpcd_psr_configuration psr_configuration;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
- psr_configuration.bits.ENABLE = 1;
- psr_configuration.bits.CRC_VERIFICATION = 1;
- psr_configuration.bits.FRAME_CAPTURE_INDICATION =
- psr_config->psr_frame_capture_indication_req;
-
- /* Check for PSR v2*/
- if (psr_config->psr_version == 0x2) {
- /* For PSR v2 selective update.
- * Indicates whether sink should start capturing
- * immediately following active scan line,
- * or starting with the 2nd active scan line.
- */
- psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
- /*For PSR v2, determines whether Sink should generate
- * IRQ_HPD when CRC mismatch is detected.
- */
- psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
- }
-
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 368,
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
- psr_context->transmitterId = link->link_enc->transmitter;
- psr_context->engineId = link->link_enc->preferred_engine;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
- == stream) {
- /* dmcu -1 for all controller id values,
- * therefore +1 here
- */
- psr_context->controllerId =
- core_dc->current_state->res_ctx.
- pipe_ctx[i].stream_res.tg->inst + 1;
- break;
- }
- }
-
- /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
- psr_context->phyType = PHY_TYPE_UNIPHY;
- /*PhyId is associated with the transmitter id*/
- psr_context->smuPhyId = link->link_enc->transmitter;
-
- psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
- timing.pix_clk_khz * 1000),
- stream->timing.v_total),
- stream->timing.h_total);
-
- psr_context->psrSupportedDisplayConfig = true;
- psr_context->psrExitLinkTrainingRequired =
- psr_config->psr_exit_link_training_required;
- psr_context->sdpTransmitLineNumDeadline =
- psr_config->psr_sdp_transmit_line_num_deadline;
- psr_context->psrFrameCaptureIndicationReq =
- psr_config->psr_frame_capture_indication_req;
-
- psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
-
- psr_context->numberOfControllers =
- link->dc->res_pool->res_cap->num_timing_generator;
-
- psr_context->rfb_update_auto_en = true;
-
- /* 2 frames before enter PSR. */
- psr_context->timehyst_frames = 2;
- /* half a frame
- * (units in 100 lines, i.e. a value of 1 represents 100 lines)
- */
- psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
- psr_context->aux_repeats = 10;
-
- psr_context->psr_level.u32all = 0;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- /*skip power down the single pipe since it blocks the cstate*/
- if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
-
- /* SMU will perform additional powerdown sequence.
- * For unsupported ASICs, set psr_level flag to skip PSR
- * static screen notification to SMU.
- * (Always set for DAL2, did not check ASIC)
- */
- psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
-
- /* Complete PSR entry before aborting to prevent intermittent
- * freezes on certain eDPs
- */
- psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
-
- /* Controls additional delay after remote frame capture before
- * continuing power down, default = 0
- */
- psr_context->frame_delay = 0;
-
- link->psr_enabled = true;
- dmcu->funcs->setup_psr(dmcu, link, psr_context);
- return true;
- } else
- return false;
-
-}
-
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2418,6 +2310,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP)
+ core_dc->hwss.edp_backlight_control(pipe_ctx->stream->sink->link, false);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 00528b214a9f..61e8c3e02d16 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1470,6 +1470,12 @@ void decide_link_settings(struct dc_stream_state *stream,
return;
}
+ /* EDP use the link cap setting */
+ if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index f2902569be2e..2096f2a179f2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -88,15 +88,7 @@ void dp_enable_link_phy(
}
if (dc_is_dp_sst_signal(signal)) {
- if (signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_power_control(link, true);
- link_enc->funcs->enable_dp_output(
- link_enc,
- link_settings,
- clock_source);
- link->dc->hwss.edp_backlight_control(link, true);
- } else
- link_enc->funcs->enable_dp_output(
+ link_enc->funcs->enable_dp_output(
link_enc,
link_settings,
clock_source);
@@ -138,7 +130,6 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, false);
edp_receiver_ready_T9(link);
link->link_enc->funcs->disable_output(link->link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ad28eba017f2..95b8dd0e53c6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -498,26 +498,15 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
/* Handle hsplit */
- if (pri_split || sec_split) {
- /* HMirror XOR Secondary_pipe XOR Rotation_180 */
- bool right_view = (sec_split != plane_state->horizontal_mirror) !=
- (plane_state->rotation == ROTATION_ANGLE_180);
-
- if (plane_state->rotation == ROTATION_ANGLE_90
- || plane_state->rotation == ROTATION_ANGLE_270)
- /* Secondary_pipe XOR Rotation_270 */
- right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
- if (right_view) {
- data->viewport.x += data->viewport.width / 2;
- data->viewport_c.x += data->viewport_c.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- data->viewport_c.width = (data->viewport_c.width + 1) / 2;
- } else {
- data->viewport.width /= 2;
- data->viewport_c.width /= 2;
- }
+ if (sec_split) {
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
+ } else if (pri_split) {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
}
if (plane_state->rotation == ROTATION_ANGLE_90 ||
@@ -534,6 +523,11 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
int recout_full_x, recout_full_y;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
@@ -568,33 +562,43 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
- pipe_ctx->plane_res.scl_data.recout.y;
/* Handle h & vsplit */
- if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
- pipe_ctx->plane_state) {
- if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
- /* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ if (sec_split && top_bottom_split) {
+ pipe_ctx->plane_res.scl_data.recout.y +=
+ pipe_ctx->plane_res.scl_data.recout.height / 2;
+ /* Floor primary pipe, ceil 2ndary pipe */
+ pipe_ctx->plane_res.scl_data.recout.height =
+ (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ } else if (pri_split && top_bottom_split)
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ else if (pri_split || sec_split) {
+ /* HMirror XOR Secondary_pipe XOR Rotation_180 */
+ bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+ (plane_state->rotation == ROTATION_ANGLE_180);
+
+ if (plane_state->rotation == ROTATION_ANGLE_90
+ || plane_state->rotation == ROTATION_ANGLE_270)
+ /* Secondary_pipe XOR Rotation_270 */
+ right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+ if (right_view) {
+ pipe_ctx->plane_res.scl_data.recout.x +=
+ pipe_ctx->plane_res.scl_data.recout.width / 2;
+ /* Ceil offset pipe */
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
} else {
- pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
- pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- }
- } else if (pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
- if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else
pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
}
-
/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
* * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
* ratio)
*/
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
surf_src.x * plane_state->dst_rect.width / surf_src.width
* stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
surf_src.y * plane_state->dst_rect.height / surf_src.height
* stream->dst.height / stream->src.height;
@@ -650,7 +654,20 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
struct rect src = pipe_ctx->plane_state->src_rect;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
+ /*
+ * Need to calculate the scan direction for viewport to make adjustments
+ */
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
+ flip_vert_scan_dir = true;
+ flip_horz_scan_dir = true;
+ } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
+ flip_vert_scan_dir = true;
+ else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ flip_horz_scan_dir = true;
+ if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
@@ -715,7 +732,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
}
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
+ if (data->viewport.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
@@ -736,7 +753,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
}
- if (data->viewport_c.x) {
+ if (data->viewport_c.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
@@ -757,7 +774,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
}
- if (data->viewport.y) {
+ if (data->viewport.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
@@ -778,7 +795,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
}
- if (data->viewport_c.y) {
+ if (data->viewport_c.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 375fb457e223..261811e0c094 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -226,7 +226,7 @@ bool dc_stream_set_cursor_attributes(
if (pipe_ctx->plane_res.dpp != NULL &&
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.dpp, attributes);
+ pipe_ctx->plane_res.dpp, attributes->color_format);
}
stream->cursor_attributes = *attributes;
@@ -301,6 +301,8 @@ bool dc_stream_set_cursor_position(
}
+ stream->cursor_position = *position;
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index c99ed85ba9a2..e2e3c9df79ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.20"
+#define DC_VER "3.1.27"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -250,6 +250,8 @@ struct dc {
*/
struct dm_pp_display_configuration prev_display_config;
+ bool optimized_required;
+
/* FBC compressor */
#if defined(CONFIG_DRM_AMD_DC_FBC)
struct compressor *fbc_compressor;
@@ -340,7 +342,7 @@ struct dc_hdr_static_metadata {
enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
- TF_TYPE_BYPASS
+ TF_TYPE_BYPASS,
};
struct dc_transfer_func_distributed_points {
@@ -359,6 +361,7 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_BT709,
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
+ TRANSFER_FUNCTION_UNITY,
};
struct dc_transfer_func {
@@ -385,6 +388,7 @@ union surface_update_flags {
struct {
/* Medium updates */
+ uint32_t dcc_change:1;
uint32_t color_space_change:1;
uint32_t input_tf_change:1;
uint32_t horizontal_mirror_change:1;
@@ -436,6 +440,7 @@ struct dc_plane_state {
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
+ bool is_tiling_rotated;
bool per_pixel_alpha;
bool visible;
bool flip_immediate;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 587c0bb3d4ac..03029f72dc3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -579,8 +579,6 @@ enum dc_timing_standard {
TIMING_STANDARD_MAX
};
-
-
enum dc_color_depth {
COLOR_DEPTH_UNDEFINED,
COLOR_DEPTH_666,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index fed0e5ea9625..01c60f11b2bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -86,6 +86,7 @@ struct dc_stream_state {
struct dc_stream_status status;
struct dc_cursor_attributes cursor_attributes;
+ struct dc_cursor_position cursor_position;
/* from stream struct */
struct kref refcount;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9291a60126ad..9faddfae241d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -218,6 +218,7 @@ struct dc_edid_caps {
bool lte_340mcsc_scramble;
bool edid_hdmi;
+ bool hdr_supported;
};
struct view {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 3fe8e697483f..b48190f54907 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -385,21 +385,12 @@ static bool dce_abm_init_backlight(struct abm *abm)
return true;
}
-static bool is_dmcu_initialized(struct abm *abm)
-{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int dmcu_uc_reset;
-
- REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
-
- return !dmcu_uc_reset;
-}
-
static bool dce_abm_set_backlight_level(
struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
- unsigned int controller_id)
+ unsigned int controller_id,
+ bool use_smooth_brightness)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
@@ -408,7 +399,7 @@ static bool dce_abm_set_backlight_level(
backlight_level, backlight_level);
/* If DMCU is in reset state, DMCU is uninitialized */
- if (is_dmcu_initialized(abm))
+ if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce,
backlight_level,
frame_ramp,
@@ -425,8 +416,7 @@ static const struct abm_funcs dce_funcs = {
.init_backlight = dce_abm_init_backlight,
.set_backlight_level = dce_abm_set_backlight_level,
.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
- .set_abm_immediate_disable = dce_abm_immediate_disable,
- .is_dmcu_initialized = is_dmcu_initialized
+ .set_abm_immediate_disable = dce_abm_immediate_disable
};
static void dce_abm_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 59e909ec88f2..ff9436966041 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -37,8 +37,7 @@
SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
- SR(MASTER_COMM_DATA_REG1), \
- SR(DMCU_STATUS)
+ SR(MASTER_COMM_DATA_REG1)
#define ABM_DCE110_COMMON_REG_LIST() \
ABM_COMMON_REG_LIST_DCE_BASE(), \
@@ -84,8 +83,7 @@
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
- ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
- ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh)
#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
@@ -174,7 +172,6 @@
type MASTER_COMM_CMD_REG_BYTE2; \
type BL_PWM_REF_DIV; \
type BL_PWM_EN; \
- type UC_IN_RESET; \
type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
type BL_PWM_GRP1_REG_LOCK; \
type BL_PWM_GRP1_REG_UPDATE_PENDING
@@ -206,7 +203,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
- uint32_t DMCU_STATUS;
uint32_t BL_PWM_GRP1_REG_LOCK;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 9031d22285ea..9e98a5f39a6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -29,7 +29,6 @@
#include "fixed32_32.h"
#include "bios_parser_interface.h"
#include "dc.h"
-#include "dce_abm.h"
#include "dmcu.h"
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn_calcs.h"
@@ -384,7 +383,6 @@ static int dce112_set_clock(
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk->ctx->dc_bios;
struct dc *core_dc = clk->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
@@ -417,7 +415,7 @@ static int dce112_set_clock(
bp->funcs->set_dce_clock(bp, &dce_clk_params);
- if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
+ if (clk_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_clock / 1000 / 7);
clk_dce->dfs_bypass_disp_clk = actual_clock;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a6de99db0444..f663adb33584 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -263,15 +263,35 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
}
+static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_uc_reset;
+
+ /* microcontroller is not running */
+ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+ /* DMCU is not running */
+ if (dmcu_uc_reset)
+ return false;
+
+ return true;
+}
+
static void dce_psr_wait_loop(
struct dmcu *dmcu,
unsigned int wait_loop_number)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+
if (dmcu->cached_wait_loop_number == wait_loop_number)
return;
+ /* DMCU is not running */
+ if (!dce_is_dmcu_initialized(dmcu))
+ return;
+
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
@@ -691,6 +711,14 @@ static void dcn10_get_psr_wait_loop(
return;
}
+static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
+{
+ /* microcontroller is not running */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+ return true;
+}
+
#endif
static const struct dmcu_funcs dce_funcs = {
@@ -700,7 +728,8 @@ static const struct dmcu_funcs dce_funcs = {
.setup_psr = dce_dmcu_setup_psr,
.get_psr_state = dce_get_dmcu_psr_state,
.set_psr_wait_loop = dce_psr_wait_loop,
- .get_psr_wait_loop = dce_get_psr_wait_loop
+ .get_psr_wait_loop = dce_get_psr_wait_loop,
+ .is_dmcu_initialized = dce_is_dmcu_initialized
};
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -711,7 +740,8 @@ static const struct dmcu_funcs dcn10_funcs = {
.setup_psr = dcn10_dmcu_setup_psr,
.get_psr_state = dcn10_get_dmcu_psr_state,
.set_psr_wait_loop = dcn10_psr_wait_loop,
- .get_psr_wait_loop = dcn10_get_psr_wait_loop
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .is_dmcu_initialized = dcn10_is_dmcu_initialized
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 4c25e2dd28f8..1d4546f23135 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -62,6 +62,8 @@
DMCU_ENABLE, mask_sh), \
DMCU_SF(DMCU_STATUS, \
UC_IN_STOP_MODE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_RESET, mask_sh), \
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
IRAM_HOST_ACCESS_EN, mask_sh), \
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
@@ -98,6 +100,7 @@
type IRAM_RD_ADDR_AUTO_INC; \
type DMCU_ENABLE; \
type UC_IN_STOP_MODE; \
+ type UC_IN_RESET; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_INTERRUPT; \
type DPHY_RX_FAST_TRAINING_CAPABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 3b0db253ac22..b73db9e78437 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -582,7 +582,8 @@ struct dce_hwseq_registers {
type DOMAIN7_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
- type DENTIST_DPPCLK_WDIVIDER;
+ type DENTIST_DPPCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index bad70c6b3aad..a266e3f5e75f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1072,21 +1072,6 @@ void dce110_link_encoder_disable_output(
/* disable encoder */
if (dc_is_dp_signal(signal))
link_encoder_disable(enc110);
-
- /*
- * TODO: Power control cause regression, we should implement
- * it properly, for now just comment it.
- */
-// if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
-// /* power down eDP panel */
-// link_encoder_edp_wait_for_hpd_ready(
-// enc,
-// enc->connector,
-// false);
-//
-// link_encoder_edp_power_control(
-// enc, false);
-// }
}
void dce110_link_encoder_dp_set_lane_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e650bdcd9423..86cdd7b4811f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -354,8 +354,8 @@ static bool convert_to_custom_float(struct pwl_result_data *rgb_resulted,
return false;
}
- if (!convert_to_custom_float_format(arr_points[2].slope, &fmt,
- &arr_points[2].custom_float_slope)) {
+ if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
+ &arr_points[1].custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -870,8 +870,6 @@ void hwss_edp_power_control(
"%s: Skipping Panel Power action: %s\n",
__func__, (power_up ? "On":"Off"));
}
-
- hwss_edp_wait_for_hpd_ready(link, true);
}
/*todo: cloned in stream enc, fix*/
@@ -972,11 +970,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
}
/* blank at encoder level */
- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
- hwss_edp_backlight_control(link, false);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
- }
+
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
pipe_ctx->stream_res.stream_enc->id,
@@ -988,15 +984,12 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = { { 0 } };
- struct dc_link *link = pipe_ctx->stream->sink->link;
/* only 3 items below are used by unblank */
params.pixel_clk_khz =
pipe_ctx->stream->timing.pix_clk_khz;
params.link_settings.link_rate = link_settings->link_rate;
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
- if (link->connector_signal == SIGNAL_TYPE_EDP)
- hwss_edp_backlight_control(link, true);
}
@@ -1342,10 +1335,8 @@ static void power_down_encoders(struct dc *dc)
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
- if (connector_id == CONNECTOR_ID_EDP) {
+ if (connector_id == CONNECTOR_ID_EDP)
signal = SIGNAL_TYPE_EDP;
- hwss_edp_backlight_control(dc->links[i], false);
- }
}
dc->links[i]->link_enc->funcs->disable_output(
@@ -1698,60 +1689,54 @@ static void apply_min_clocks(
/*
* Check if FBC can be enabled
*/
-static enum dc_status validate_fbc(struct dc *dc,
- struct dc_state *context)
+static bool should_enable_fbc(struct dc *dc,
+ struct dc_state *context)
{
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
ASSERT(dc->fbc_compressor);
/* FBC memory should be allocated */
if (!dc->ctx->fbc_gpu_addr)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only supports single display */
if (context->stream_count != 1)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* PSR should not be enabled */
if (pipe_ctx->stream->sink->link->psr_enabled)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Nothing to compress */
if (!pipe_ctx->plane_state)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- return DC_ERROR_UNEXPECTED;
+ return false;
- return DC_OK;
+ return true;
}
/*
* Enable FBC
*/
-static enum dc_status enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(struct dc *dc,
+ struct dc_state *context)
{
- enum dc_status status = validate_fbc(dc, context);
-
- if (status == DC_OK) {
+ if (should_enable_fbc(dc, context)) {
/* Program GRPH COMPRESSED ADDRESS and PITCH */
struct compr_addr_and_pitch_params params = {0, 0, 0};
struct compressor *compr = dc->fbc_compressor;
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
- params.source_view_width =
- pipe_ctx->stream->timing.h_addressable;
- params.source_view_height =
- pipe_ctx->stream->timing.v_addressable;
+ params.source_view_width = pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height = pipe_ctx->stream->timing.v_addressable;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
@@ -1760,7 +1745,6 @@ static enum dc_status enable_fbc(struct dc *dc,
compr->funcs->enable_fbc(compr, &params);
}
- return status;
}
#endif
@@ -2026,8 +2010,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->stream == pipe_ctx_old->stream)
continue;
- if (pipe_ctx->stream && pipe_ctx_old->stream
- && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
continue;
if (pipe_ctx->top_pipe)
@@ -2063,9 +2046,6 @@ enum dc_status dce110_apply_ctx_to_hw(
context,
dc);
- if (dc->hwss.enable_plane)
- dc->hwss.enable_plane(dc, pipe_ctx, context);
-
if (DC_OK != status)
return status;
}
@@ -2095,16 +2075,8 @@ static void set_default_colors(struct pipe_ctx *pipe_ctx)
struct default_adjustment default_adjust = { 0 };
default_adjust.force_hw_default = false;
- if (pipe_ctx->plane_state == NULL)
- default_adjust.in_color_space = COLOR_SPACE_SRGB;
- else
- default_adjust.in_color_space =
- pipe_ctx->plane_state->color_space;
- if (pipe_ctx->stream == NULL)
- default_adjust.out_color_space = COLOR_SPACE_SRGB;
- else
- default_adjust.out_color_space =
- pipe_ctx->stream->output_color_space;
+ default_adjust.in_color_space = pipe_ctx->plane_state->color_space;
+ default_adjust.out_color_space = pipe_ctx->stream->output_color_space;
default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
@@ -2872,13 +2844,12 @@ static void dce110_apply_ctx_for_surface(
continue;
/* Need to allocate mem before program front end for Fiji */
- if (pipe_ctx->plane_res.mi != NULL)
- pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
- pipe_ctx->plane_res.mi,
- pipe_ctx->stream->timing.h_total,
- pipe_ctx->stream->timing.v_total,
- pipe_ctx->stream->timing.pix_clk_khz,
- context->stream_count);
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_khz,
+ context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx);
@@ -2985,6 +2956,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.pplib_apply_display_requirements = pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 2dd6ac637572..fc637647f643 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -77,5 +77,9 @@ void hwss_edp_backlight_control(
struct dc_link *link,
bool enable);
+void hwss_edp_wait_for_hpd_ready(
+ struct dc_link *link,
+ bool power_up);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 9eac228315b5..5469bdfe19f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -23,7 +23,7 @@
# Makefile for DCN.
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
- dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 7f579cb19f4b..53ba3600ee6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -22,11 +22,12 @@
* Authors: AMD
*
*/
-
+#include "dc.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "dcn10_cm_common.h"
+#include "custom_float.h"
#define REG(reg) reg
@@ -121,3 +122,294 @@ void cm_helper_program_xfer_func(
}
}
+
+
+
+bool cm_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num,
+ bool fixpoint)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (fixpoint == true)
+ arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y);
+ else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
+ &arr_points[1].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
+ return true;
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(rgb->red, &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->green, &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->blue, &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_SEGMENTS 32
+
+bool cm_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE();
+
+ arr_points = lut_params->arr_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < 32 ; i++)
+ seg_distr[i] = 3;
+
+ segment_start = -25;
+ segment_end = 7;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+
+ segment_start = -10;
+ segment_end = 0;
+ }
+
+ for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
+
+ if (fixpoint == true) {
+ rgb->delta_red_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_red);
+ rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green);
+ rgb->delta_blue_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_blue);
+ rgb->red_reg = dal_fixed31_32_clamp_u0d14(rgb->red);
+ rgb->green_reg = dal_fixed31_32_clamp_u0d14(rgb->green);
+ rgb->blue_reg = dal_fixed31_32_clamp_u0d14(rgb->blue);
+ }
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->arr_points,
+ hw_points, fixpoint);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 64836dcf21f2..64e476b83bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -96,4 +96,14 @@ void cm_helper_program_xfer_func(
const struct pwl_params *params,
const struct xfer_func_reg *reg);
+bool cm_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num,
+ bool fixpoint);
+
+bool cm_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 8df3945370cf..f2a08b156cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
- /*
- * Spreadsheet doesn't handle taps_c is one properly,
- * need to force Chroma to always be scaled to pass
- * bandwidth validation.
- */
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
}
return true;
@@ -386,10 +385,9 @@ void dpp1_cnv_setup (
void dpp1_set_cursor_attributes(
struct dpp *dpp_base,
- const struct dc_cursor_attributes *attr)
+ enum dc_cursor_color_format color_format)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- enum dc_cursor_color_format color_format = attr->color_format;
REG_UPDATE_2(CURSOR0_CONTROL,
CUR0_MODE, color_format,
@@ -402,13 +400,6 @@ void dpp1_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
-
- /* TODO: Fixed vs float */
-
- REG_UPDATE_3(FORMAT_CONTROL,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 1,
- FORMAT_EXPANSION_MODE, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index ad71fb50f8a5..f56ee4d08d89 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -730,8 +730,9 @@
type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
type CM_BLNDGAM_LUT_WRITE_SEL; \
+ type CM_BLNDGAM_CONFIG_STATUS; \
type CM_BLNDGAM_LUT_INDEX; \
- type CM_BLNDGAM_LUT_DATA; \
+ type BLNDGAM_MEM_PWR_FORCE; \
type CM_3DLUT_MODE; \
type CM_3DLUT_SIZE; \
type CM_3DLUT_INDEX; \
@@ -905,6 +906,7 @@
type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_SHAPER_LUT_WRITE_EN_MASK; \
+ type CM_SHAPER_CONFIG_STATUS; \
type CM_SHAPER_LUT_WRITE_SEL; \
type CM_SHAPER_LUT_INDEX; \
type CM_SHAPER_LUT_DATA; \
@@ -1005,258 +1007,255 @@
type CM_BYPASS; \
type FORMAT_CONTROL__ALPHA_EN; \
type CUR0_COLOR0; \
- type CUR0_COLOR1
-
-
+ type CUR0_COLOR1;
struct dcn_dpp_shift {
- TF_REG_FIELD_LIST(uint8_t);
+ TF_REG_FIELD_LIST(uint8_t)
};
struct dcn_dpp_mask {
- TF_REG_FIELD_LIST(uint32_t);
+ TF_REG_FIELD_LIST(uint32_t)
};
-
-
+#define DPP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \
+ uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \
+ uint32_t OTG_H_BLANK; \
+ uint32_t OTG_V_BLANK; \
+ uint32_t SCL_MODE; \
+ uint32_t LB_DATA_FORMAT; \
+ uint32_t LB_MEMORY_CTRL; \
+ uint32_t DSCL_AUTOCAL; \
+ uint32_t SCL_BLACK_OFFSET; \
+ uint32_t SCL_TAP_CONTROL; \
+ uint32_t SCL_COEF_RAM_TAP_SELECT; \
+ uint32_t SCL_COEF_RAM_TAP_DATA; \
+ uint32_t DSCL_2TAP_CONTROL; \
+ uint32_t MPC_SIZE; \
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO; \
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \
+ uint32_t SCL_HORZ_FILTER_INIT; \
+ uint32_t SCL_HORZ_FILTER_INIT_C; \
+ uint32_t SCL_VERT_FILTER_INIT; \
+ uint32_t SCL_VERT_FILTER_INIT_BOT; \
+ uint32_t SCL_VERT_FILTER_INIT_C; \
+ uint32_t SCL_VERT_FILTER_INIT_BOT_C; \
+ uint32_t RECOUT_START; \
+ uint32_t RECOUT_SIZE; \
+ uint32_t CM_GAMUT_REMAP_CONTROL; \
+ uint32_t CM_GAMUT_REMAP_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_C33_C34; \
+ uint32_t CM_COMA_C11_C12; \
+ uint32_t CM_COMA_C33_C34; \
+ uint32_t CM_COMB_C11_C12; \
+ uint32_t CM_COMB_C33_C34; \
+ uint32_t CM_OCSC_CONTROL; \
+ uint32_t CM_OCSC_C11_C12; \
+ uint32_t CM_OCSC_C33_C34; \
+ uint32_t CM_MEM_PWR_CTRL; \
+ uint32_t CM_RGAM_LUT_DATA; \
+ uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_RGAM_LUT_INDEX; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_RGAM_RAMB_REGION_0_1; \
+ uint32_t CM_RGAM_RAMB_REGION_32_33; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_RGAM_RAMA_REGION_0_1; \
+ uint32_t CM_RGAM_RAMA_REGION_32_33; \
+ uint32_t CM_RGAM_CONTROL; \
+ uint32_t CM_CMOUT_CONTROL; \
+ uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_BLNDGAM_CONTROL; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \
+ uint32_t CM_BLNDGAM_LUT_INDEX; \
+ uint32_t CM_3DLUT_MODE; \
+ uint32_t CM_3DLUT_INDEX; \
+ uint32_t CM_3DLUT_DATA; \
+ uint32_t CM_3DLUT_DATA_30BIT; \
+ uint32_t CM_3DLUT_READ_WRITE_CONTROL; \
+ uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \
+ uint32_t CM_SHAPER_CONTROL; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_B; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_G; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_R; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_B; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_G; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_R; \
+ uint32_t CM_SHAPER_RAMB_REGION_0_1; \
+ uint32_t CM_SHAPER_RAMB_REGION_2_3; \
+ uint32_t CM_SHAPER_RAMB_REGION_4_5; \
+ uint32_t CM_SHAPER_RAMB_REGION_6_7; \
+ uint32_t CM_SHAPER_RAMB_REGION_8_9; \
+ uint32_t CM_SHAPER_RAMB_REGION_10_11; \
+ uint32_t CM_SHAPER_RAMB_REGION_12_13; \
+ uint32_t CM_SHAPER_RAMB_REGION_14_15; \
+ uint32_t CM_SHAPER_RAMB_REGION_16_17; \
+ uint32_t CM_SHAPER_RAMB_REGION_18_19; \
+ uint32_t CM_SHAPER_RAMB_REGION_20_21; \
+ uint32_t CM_SHAPER_RAMB_REGION_22_23; \
+ uint32_t CM_SHAPER_RAMB_REGION_24_25; \
+ uint32_t CM_SHAPER_RAMB_REGION_26_27; \
+ uint32_t CM_SHAPER_RAMB_REGION_28_29; \
+ uint32_t CM_SHAPER_RAMB_REGION_30_31; \
+ uint32_t CM_SHAPER_RAMB_REGION_32_33; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_B; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_G; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_R; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_B; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_G; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_R; \
+ uint32_t CM_SHAPER_RAMA_REGION_0_1; \
+ uint32_t CM_SHAPER_RAMA_REGION_2_3; \
+ uint32_t CM_SHAPER_RAMA_REGION_4_5; \
+ uint32_t CM_SHAPER_RAMA_REGION_6_7; \
+ uint32_t CM_SHAPER_RAMA_REGION_8_9; \
+ uint32_t CM_SHAPER_RAMA_REGION_10_11; \
+ uint32_t CM_SHAPER_RAMA_REGION_12_13; \
+ uint32_t CM_SHAPER_RAMA_REGION_14_15; \
+ uint32_t CM_SHAPER_RAMA_REGION_16_17; \
+ uint32_t CM_SHAPER_RAMA_REGION_18_19; \
+ uint32_t CM_SHAPER_RAMA_REGION_20_21; \
+ uint32_t CM_SHAPER_RAMA_REGION_22_23; \
+ uint32_t CM_SHAPER_RAMA_REGION_24_25; \
+ uint32_t CM_SHAPER_RAMA_REGION_26_27; \
+ uint32_t CM_SHAPER_RAMA_REGION_28_29; \
+ uint32_t CM_SHAPER_RAMA_REGION_30_31; \
+ uint32_t CM_SHAPER_RAMA_REGION_32_33; \
+ uint32_t CM_SHAPER_LUT_INDEX; \
+ uint32_t CM_SHAPER_LUT_DATA; \
+ uint32_t CM_ICSC_CONTROL; \
+ uint32_t CM_ICSC_C11_C12; \
+ uint32_t CM_ICSC_C33_C34; \
+ uint32_t CM_BNS_VALUES_R; \
+ uint32_t CM_BNS_VALUES_G; \
+ uint32_t CM_BNS_VALUES_B; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_DGAM_RAMB_REGION_0_1; \
+ uint32_t CM_DGAM_RAMB_REGION_14_15; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_DGAM_RAMA_REGION_0_1; \
+ uint32_t CM_DGAM_RAMA_REGION_14_15; \
+ uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_DGAM_LUT_INDEX; \
+ uint32_t CM_DGAM_LUT_DATA; \
+ uint32_t CM_CONTROL; \
+ uint32_t CM_DGAM_CONTROL; \
+ uint32_t CM_IGAM_CONTROL; \
+ uint32_t CM_IGAM_LUT_RW_CONTROL; \
+ uint32_t CM_IGAM_LUT_RW_INDEX; \
+ uint32_t CM_IGAM_LUT_SEQ_COLOR; \
+ uint32_t FORMAT_CONTROL; \
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT; \
+ uint32_t CURSOR_CONTROL; \
+ uint32_t CURSOR0_CONTROL; \
+ uint32_t CURSOR0_COLOR0; \
+ uint32_t CURSOR0_COLOR1;
struct dcn_dpp_registers {
- uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT;
- uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM;
- uint32_t OTG_H_BLANK;
- uint32_t OTG_V_BLANK;
- uint32_t SCL_MODE;
- uint32_t LB_DATA_FORMAT;
- uint32_t LB_MEMORY_CTRL;
- uint32_t DSCL_AUTOCAL;
- uint32_t SCL_BLACK_OFFSET;
- uint32_t SCL_TAP_CONTROL;
- uint32_t SCL_COEF_RAM_TAP_SELECT;
- uint32_t SCL_COEF_RAM_TAP_DATA;
- uint32_t DSCL_2TAP_CONTROL;
- uint32_t MPC_SIZE;
- uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
- uint32_t SCL_VERT_FILTER_SCALE_RATIO;
- uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C;
- uint32_t SCL_VERT_FILTER_SCALE_RATIO_C;
- uint32_t SCL_HORZ_FILTER_INIT;
- uint32_t SCL_HORZ_FILTER_INIT_C;
- uint32_t SCL_VERT_FILTER_INIT;
- uint32_t SCL_VERT_FILTER_INIT_BOT;
- uint32_t SCL_VERT_FILTER_INIT_C;
- uint32_t SCL_VERT_FILTER_INIT_BOT_C;
- uint32_t RECOUT_START;
- uint32_t RECOUT_SIZE;
- uint32_t CM_GAMUT_REMAP_CONTROL;
- uint32_t CM_GAMUT_REMAP_C11_C12;
- uint32_t CM_GAMUT_REMAP_C33_C34;
- uint32_t CM_COMA_C11_C12;
- uint32_t CM_COMA_C33_C34;
- uint32_t CM_COMB_C11_C12;
- uint32_t CM_COMB_C33_C34;
- uint32_t CM_OCSC_CONTROL;
- uint32_t CM_OCSC_C11_C12;
- uint32_t CM_OCSC_C33_C34;
- uint32_t CM_MEM_PWR_CTRL;
- uint32_t CM_RGAM_LUT_DATA;
- uint32_t CM_RGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_RGAM_LUT_INDEX;
- uint32_t CM_RGAM_RAMB_START_CNTL_B;
- uint32_t CM_RGAM_RAMB_START_CNTL_G;
- uint32_t CM_RGAM_RAMB_START_CNTL_R;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_RGAM_RAMB_END_CNTL1_B;
- uint32_t CM_RGAM_RAMB_END_CNTL2_B;
- uint32_t CM_RGAM_RAMB_END_CNTL1_G;
- uint32_t CM_RGAM_RAMB_END_CNTL2_G;
- uint32_t CM_RGAM_RAMB_END_CNTL1_R;
- uint32_t CM_RGAM_RAMB_END_CNTL2_R;
- uint32_t CM_RGAM_RAMB_REGION_0_1;
- uint32_t CM_RGAM_RAMB_REGION_32_33;
- uint32_t CM_RGAM_RAMA_START_CNTL_B;
- uint32_t CM_RGAM_RAMA_START_CNTL_G;
- uint32_t CM_RGAM_RAMA_START_CNTL_R;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_RGAM_RAMA_END_CNTL1_B;
- uint32_t CM_RGAM_RAMA_END_CNTL2_B;
- uint32_t CM_RGAM_RAMA_END_CNTL1_G;
- uint32_t CM_RGAM_RAMA_END_CNTL2_G;
- uint32_t CM_RGAM_RAMA_END_CNTL1_R;
- uint32_t CM_RGAM_RAMA_END_CNTL2_R;
- uint32_t CM_RGAM_RAMA_REGION_0_1;
- uint32_t CM_RGAM_RAMA_REGION_32_33;
- uint32_t CM_RGAM_CONTROL;
- uint32_t CM_CMOUT_CONTROL;
- uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_BLNDGAM_CONTROL;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_B;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_G;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_R;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R;
- uint32_t CM_BLNDGAM_RAMB_REGION_0_1;
- uint32_t CM_BLNDGAM_RAMB_REGION_2_3;
- uint32_t CM_BLNDGAM_RAMB_REGION_4_5;
- uint32_t CM_BLNDGAM_RAMB_REGION_6_7;
- uint32_t CM_BLNDGAM_RAMB_REGION_8_9;
- uint32_t CM_BLNDGAM_RAMB_REGION_10_11;
- uint32_t CM_BLNDGAM_RAMB_REGION_12_13;
- uint32_t CM_BLNDGAM_RAMB_REGION_14_15;
- uint32_t CM_BLNDGAM_RAMB_REGION_16_17;
- uint32_t CM_BLNDGAM_RAMB_REGION_18_19;
- uint32_t CM_BLNDGAM_RAMB_REGION_20_21;
- uint32_t CM_BLNDGAM_RAMB_REGION_22_23;
- uint32_t CM_BLNDGAM_RAMB_REGION_24_25;
- uint32_t CM_BLNDGAM_RAMB_REGION_26_27;
- uint32_t CM_BLNDGAM_RAMB_REGION_28_29;
- uint32_t CM_BLNDGAM_RAMB_REGION_30_31;
- uint32_t CM_BLNDGAM_RAMB_REGION_32_33;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_B;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_G;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_R;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R;
- uint32_t CM_BLNDGAM_RAMA_REGION_0_1;
- uint32_t CM_BLNDGAM_RAMA_REGION_2_3;
- uint32_t CM_BLNDGAM_RAMA_REGION_4_5;
- uint32_t CM_BLNDGAM_RAMA_REGION_6_7;
- uint32_t CM_BLNDGAM_RAMA_REGION_8_9;
- uint32_t CM_BLNDGAM_RAMA_REGION_10_11;
- uint32_t CM_BLNDGAM_RAMA_REGION_12_13;
- uint32_t CM_BLNDGAM_RAMA_REGION_14_15;
- uint32_t CM_BLNDGAM_RAMA_REGION_16_17;
- uint32_t CM_BLNDGAM_RAMA_REGION_18_19;
- uint32_t CM_BLNDGAM_RAMA_REGION_20_21;
- uint32_t CM_BLNDGAM_RAMA_REGION_22_23;
- uint32_t CM_BLNDGAM_RAMA_REGION_24_25;
- uint32_t CM_BLNDGAM_RAMA_REGION_26_27;
- uint32_t CM_BLNDGAM_RAMA_REGION_28_29;
- uint32_t CM_BLNDGAM_RAMA_REGION_30_31;
- uint32_t CM_BLNDGAM_RAMA_REGION_32_33;
- uint32_t CM_BLNDGAM_LUT_INDEX;
- uint32_t CM_BLNDGAM_LUT_DATA;
- uint32_t CM_3DLUT_MODE;
- uint32_t CM_3DLUT_INDEX;
- uint32_t CM_3DLUT_DATA;
- uint32_t CM_3DLUT_DATA_30BIT;
- uint32_t CM_3DLUT_READ_WRITE_CONTROL;
- uint32_t CM_SHAPER_LUT_WRITE_EN_MASK;
- uint32_t CM_SHAPER_CONTROL;
- uint32_t CM_SHAPER_RAMB_START_CNTL_B;
- uint32_t CM_SHAPER_RAMB_START_CNTL_G;
- uint32_t CM_SHAPER_RAMB_START_CNTL_R;
- uint32_t CM_SHAPER_RAMB_END_CNTL_B;
- uint32_t CM_SHAPER_RAMB_END_CNTL_G;
- uint32_t CM_SHAPER_RAMB_END_CNTL_R;
- uint32_t CM_SHAPER_RAMB_REGION_0_1;
- uint32_t CM_SHAPER_RAMB_REGION_2_3;
- uint32_t CM_SHAPER_RAMB_REGION_4_5;
- uint32_t CM_SHAPER_RAMB_REGION_6_7;
- uint32_t CM_SHAPER_RAMB_REGION_8_9;
- uint32_t CM_SHAPER_RAMB_REGION_10_11;
- uint32_t CM_SHAPER_RAMB_REGION_12_13;
- uint32_t CM_SHAPER_RAMB_REGION_14_15;
- uint32_t CM_SHAPER_RAMB_REGION_16_17;
- uint32_t CM_SHAPER_RAMB_REGION_18_19;
- uint32_t CM_SHAPER_RAMB_REGION_20_21;
- uint32_t CM_SHAPER_RAMB_REGION_22_23;
- uint32_t CM_SHAPER_RAMB_REGION_24_25;
- uint32_t CM_SHAPER_RAMB_REGION_26_27;
- uint32_t CM_SHAPER_RAMB_REGION_28_29;
- uint32_t CM_SHAPER_RAMB_REGION_30_31;
- uint32_t CM_SHAPER_RAMB_REGION_32_33;
- uint32_t CM_SHAPER_RAMA_START_CNTL_B;
- uint32_t CM_SHAPER_RAMA_START_CNTL_G;
- uint32_t CM_SHAPER_RAMA_START_CNTL_R;
- uint32_t CM_SHAPER_RAMA_END_CNTL_B;
- uint32_t CM_SHAPER_RAMA_END_CNTL_G;
- uint32_t CM_SHAPER_RAMA_END_CNTL_R;
- uint32_t CM_SHAPER_RAMA_REGION_0_1;
- uint32_t CM_SHAPER_RAMA_REGION_2_3;
- uint32_t CM_SHAPER_RAMA_REGION_4_5;
- uint32_t CM_SHAPER_RAMA_REGION_6_7;
- uint32_t CM_SHAPER_RAMA_REGION_8_9;
- uint32_t CM_SHAPER_RAMA_REGION_10_11;
- uint32_t CM_SHAPER_RAMA_REGION_12_13;
- uint32_t CM_SHAPER_RAMA_REGION_14_15;
- uint32_t CM_SHAPER_RAMA_REGION_16_17;
- uint32_t CM_SHAPER_RAMA_REGION_18_19;
- uint32_t CM_SHAPER_RAMA_REGION_20_21;
- uint32_t CM_SHAPER_RAMA_REGION_22_23;
- uint32_t CM_SHAPER_RAMA_REGION_24_25;
- uint32_t CM_SHAPER_RAMA_REGION_26_27;
- uint32_t CM_SHAPER_RAMA_REGION_28_29;
- uint32_t CM_SHAPER_RAMA_REGION_30_31;
- uint32_t CM_SHAPER_RAMA_REGION_32_33;
- uint32_t CM_SHAPER_LUT_INDEX;
- uint32_t CM_SHAPER_LUT_DATA;
- uint32_t CM_ICSC_CONTROL;
- uint32_t CM_ICSC_C11_C12;
- uint32_t CM_ICSC_C33_C34;
- uint32_t CM_BNS_VALUES_R;
- uint32_t CM_BNS_VALUES_G;
- uint32_t CM_BNS_VALUES_B;
- uint32_t CM_DGAM_RAMB_START_CNTL_B;
- uint32_t CM_DGAM_RAMB_START_CNTL_G;
- uint32_t CM_DGAM_RAMB_START_CNTL_R;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_DGAM_RAMB_END_CNTL1_B;
- uint32_t CM_DGAM_RAMB_END_CNTL2_B;
- uint32_t CM_DGAM_RAMB_END_CNTL1_G;
- uint32_t CM_DGAM_RAMB_END_CNTL2_G;
- uint32_t CM_DGAM_RAMB_END_CNTL1_R;
- uint32_t CM_DGAM_RAMB_END_CNTL2_R;
- uint32_t CM_DGAM_RAMB_REGION_0_1;
- uint32_t CM_DGAM_RAMB_REGION_14_15;
- uint32_t CM_DGAM_RAMA_START_CNTL_B;
- uint32_t CM_DGAM_RAMA_START_CNTL_G;
- uint32_t CM_DGAM_RAMA_START_CNTL_R;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_DGAM_RAMA_END_CNTL1_B;
- uint32_t CM_DGAM_RAMA_END_CNTL2_B;
- uint32_t CM_DGAM_RAMA_END_CNTL1_G;
- uint32_t CM_DGAM_RAMA_END_CNTL2_G;
- uint32_t CM_DGAM_RAMA_END_CNTL1_R;
- uint32_t CM_DGAM_RAMA_END_CNTL2_R;
- uint32_t CM_DGAM_RAMA_REGION_0_1;
- uint32_t CM_DGAM_RAMA_REGION_14_15;
- uint32_t CM_DGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_DGAM_LUT_INDEX;
- uint32_t CM_DGAM_LUT_DATA;
- uint32_t CM_CONTROL;
- uint32_t CM_DGAM_CONTROL;
- uint32_t CM_IGAM_CONTROL;
- uint32_t CM_IGAM_LUT_RW_CONTROL;
- uint32_t CM_IGAM_LUT_RW_INDEX;
- uint32_t CM_IGAM_LUT_SEQ_COLOR;
- uint32_t FORMAT_CONTROL;
- uint32_t CNVC_SURFACE_PIXEL_FORMAT;
- uint32_t CURSOR_CONTROL;
- uint32_t CURSOR0_CONTROL;
- uint32_t CURSOR0_COLOR0;
- uint32_t CURSOR0_COLOR1;
+ DPP_COMMON_REG_VARIABLE_LIST
};
struct dcn10_dpp {
@@ -1284,6 +1283,10 @@ enum dcn10_input_csc_select {
INPUT_CSC_SELECT_COMA
};
+void dpp1_set_cursor_attributes(
+ struct dpp *dpp_base,
+ enum dc_cursor_color_format color_format);
+
bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
@@ -1371,7 +1374,7 @@ void dpp1_cm_program_regamma_lutb_settings(
const struct pwl_params *params);
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
- const struct out_csc_color_matrix *tbl_entry);
+ const uint16_t *regval);
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4c90043e7b8c..a5b099023652 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -49,6 +49,8 @@
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
struct dcn10_input_csc_matrix {
enum dc_color_space color_space;
uint16_t regval[12];
@@ -223,18 +225,18 @@ void dpp1_cm_set_gamut_remap(
static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
- const struct out_csc_color_matrix *tbl_entry)
+ const uint16_t *regval)
{
uint32_t mode;
struct color_matrices_reg gam_regs;
REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
- if (tbl_entry == NULL) {
+ if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
-
+ mode = 4;
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
@@ -247,7 +249,7 @@ static void dpp1_cm_program_color_matrix(
cm_helper_program_color_matrices(
dpp->base.ctx,
- tbl_entry->regval,
+ regval,
&gam_regs);
} else {
@@ -257,7 +259,7 @@ static void dpp1_cm_program_color_matrix(
cm_helper_program_color_matrices(
dpp->base.ctx,
- tbl_entry->regval,
+ regval,
&gam_regs);
}
}
@@ -266,24 +268,18 @@ void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
enum dc_color_space colorspace)
{
-
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- struct out_csc_color_matrix tbl_entry;
- int i, j;
- int arr_size = sizeof(output_csc_matrix) / sizeof(struct output_csc_matrix);
+ const uint16_t *regval = NULL;
+ int arr_size;
uint32_t ocsc_mode = 4;
- tbl_entry.color_space = colorspace;
-
- for (i = 0; i < arr_size; i++)
- if (output_csc_matrix[i].color_space == colorspace) {
- for (j = 0; j < 12; j++)
- tbl_entry.regval[j] = output_csc_matrix[i].regval[j];
- break;
- }
-
+ regval = find_color_matrix(colorspace, &arr_size);
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ dpp1_cm_program_color_matrix(dpp, regval);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
- dpp1_cm_program_color_matrix(dpp, &tbl_entry);
}
static void dpp1_cm_get_reg_field(
@@ -315,41 +311,12 @@ static void dpp1_cm_get_reg_field(
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
- const struct out_csc_color_matrix *tbl_entry)
+ const uint16_t *regval)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- //enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
uint32_t ocsc_mode = 4;
-
- /**
- *if (tbl_entry != NULL) {
- * switch (tbl_entry->color_space) {
- * case COLOR_SPACE_SRGB:
- * case COLOR_SPACE_2020_RGB_FULLRANGE:
- * ocsc_mode = 0;
- * break;
- * case COLOR_SPACE_SRGB_LIMITED:
- * case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- * ocsc_mode = 1;
- * break;
- * case COLOR_SPACE_YCBCR601:
- * case COLOR_SPACE_YCBCR601_LIMITED:
- * ocsc_mode = 2;
- * break;
- * case COLOR_SPACE_YCBCR709:
- * case COLOR_SPACE_YCBCR709_LIMITED:
- * case COLOR_SPACE_2020_YCBCR:
- * ocsc_mode = 3;
- * break;
- * case COLOR_SPACE_UNKNOWN:
- * default:
- * break;
- * }
- *}
- */
-
+ dpp1_cm_program_color_matrix(dpp, regval);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
- dpp1_cm_program_color_matrix(dpp, tbl_entry);
}
void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 584e82cc5df3..585b33384002 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -48,9 +48,20 @@ void hubp1_set_blank(struct hubp *hubp, bool blank)
HUBP_TTU_DISABLE, blank_en);
if (blank) {
- REG_WAIT(DCHUBP_CNTL,
- HUBP_NO_OUTSTANDING_REQ, 1,
- 1, 200);
+ uint32_t reg_val = REG_READ(DCHUBP_CNTL);
+
+ if (reg_val) {
+ /* init sequence workaround: in case HUBP is
+ * power gated, this wait would timeout.
+ *
+ * we just wrote reg_val to non-0, if it stay 0
+ * it means HUBP is gated
+ */
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ }
+
hubp->mpcc_id = 0xf;
hubp->opp_id = 0xf;
}
@@ -96,10 +107,12 @@ static void hubp1_vready_workaround(struct hubp *hubp,
}
void hubp1_program_tiling(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
REG_UPDATE_6(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
NUM_BANKS, log_2(info->gfx9.num_banks),
@@ -116,13 +129,14 @@ void hubp1_program_tiling(
}
void hubp1_program_size_and_rotation(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
/* Program data and meta surface pitch (calculation from addrlib)
@@ -178,9 +192,10 @@ void hubp1_program_size_and_rotation(
}
void hubp1_program_pixel_format(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
enum surface_pixel_format format)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
@@ -424,13 +439,11 @@ void hubp1_program_surface_config(
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
- struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
- hubp1_program_tiling(hubp1, tiling_info, format);
+ hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size_and_rotation(
- hubp1, rotation, format, plane_size, dcc, horizontal_mirror);
- hubp1_program_pixel_format(hubp1, format);
+ hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_pixel_format(hubp, format);
}
void hubp1_program_requestor(
@@ -765,42 +778,7 @@ void hubp1_read_state(struct dcn10_hubp *hubp1,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
}
-enum cursor_pitch {
- CURSOR_PITCH_64_PIXELS = 0,
- CURSOR_PITCH_128_PIXELS,
- CURSOR_PITCH_256_PIXELS
-};
-
-enum cursor_lines_per_chunk {
- CURSOR_LINE_PER_CHUNK_2 = 1,
- CURSOR_LINE_PER_CHUNK_4,
- CURSOR_LINE_PER_CHUNK_8,
- CURSOR_LINE_PER_CHUNK_16
-};
-
-static bool ippn10_cursor_program_control(
- struct dcn10_hubp *hubp1,
- bool pixel_data_invert,
- enum dc_cursor_color_format color_format)
-{
- if (REG(CURSOR_SETTINS))
- REG_SET_2(CURSOR_SETTINS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
- else
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
-
- return true;
-}
-
-static enum cursor_pitch ippn10_get_cursor_pitch(
- unsigned int pitch)
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
{
enum cursor_pitch hw_pitch;
@@ -823,7 +801,7 @@ static enum cursor_pitch ippn10_get_cursor_pitch(
return hw_pitch;
}
-static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+static enum cursor_lines_per_chunk hubp1_get_lines_per_chunk(
unsigned int cur_width,
enum dc_cursor_color_format format)
{
@@ -849,8 +827,8 @@ void hubp1_cursor_set_attributes(
const struct dc_cursor_attributes *attr)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
- enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+ enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
attr->width, attr->color_format);
hubp->curs_attr = *attr;
@@ -863,13 +841,17 @@ void hubp1_cursor_set_attributes(
REG_UPDATE_2(CURSOR_SIZE,
CURSOR_WIDTH, attr->width,
CURSOR_HEIGHT, attr->height);
+
REG_UPDATE_3(CURSOR_CONTROL,
CURSOR_MODE, attr->color_format,
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
- ippn10_cursor_program_control(hubp1,
- attr->attribute_flags.bits.INVERT_PIXEL_DATA,
- attr->color_format);
+
+ REG_SET_2(CURSOR_SETTINS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
}
void hubp1_cursor_set_position(
@@ -909,7 +891,8 @@ void hubp1_cursor_set_position(
cur_en = 0; /* not visible beyond left edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a7834dd50716..33e91d9c010f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -127,113 +127,110 @@
SRI(CURSOR_HOT_SPOT, CURSOR, id), \
SRI(CURSOR_DST_OFFSET, CURSOR, id)
-
-
-struct dcn_mi_registers {
- uint32_t DCHUBP_CNTL;
- uint32_t HUBPREQ_DEBUG_DB;
- uint32_t DCSURF_ADDR_CONFIG;
- uint32_t DCSURF_TILING_CONFIG;
- uint32_t DCSURF_SURFACE_PITCH;
- uint32_t DCSURF_SURFACE_PITCH_C;
- uint32_t DCSURF_SURFACE_CONFIG;
- uint32_t DCSURF_FLIP_CONTROL;
- uint32_t DCSURF_PRI_VIEWPORT_DIMENSION;
- uint32_t DCSURF_PRI_VIEWPORT_START;
- uint32_t DCSURF_SEC_VIEWPORT_DIMENSION;
- uint32_t DCSURF_SEC_VIEWPORT_START;
- uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C;
- uint32_t DCSURF_PRI_VIEWPORT_START_C;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
- uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
- uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
- uint32_t DCSURF_SURFACE_INUSE;
- uint32_t DCSURF_SURFACE_INUSE_HIGH;
- uint32_t DCSURF_SURFACE_INUSE_C;
- uint32_t DCSURF_SURFACE_INUSE_HIGH_C;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C;
- uint32_t DCSURF_SURFACE_CONTROL;
- uint32_t HUBPRET_CONTROL;
- uint32_t DCN_EXPANSION_MODE;
- uint32_t DCHUBP_REQ_SIZE_CONFIG;
- uint32_t DCHUBP_REQ_SIZE_CONFIG_C;
- uint32_t BLANK_OFFSET_0;
- uint32_t BLANK_OFFSET_1;
- uint32_t DST_DIMENSIONS;
- uint32_t DST_AFTER_SCALER;
- uint32_t PREFETCH_SETTINS;
- uint32_t PREFETCH_SETTINGS;
- uint32_t VBLANK_PARAMETERS_0;
- uint32_t REF_FREQ_TO_PIX_FREQ;
- uint32_t VBLANK_PARAMETERS_1;
- uint32_t VBLANK_PARAMETERS_3;
- uint32_t NOM_PARAMETERS_0;
- uint32_t NOM_PARAMETERS_1;
- uint32_t NOM_PARAMETERS_4;
- uint32_t NOM_PARAMETERS_5;
- uint32_t PER_LINE_DELIVERY_PRE;
- uint32_t PER_LINE_DELIVERY;
- uint32_t PREFETCH_SETTINS_C;
- uint32_t PREFETCH_SETTINGS_C;
- uint32_t VBLANK_PARAMETERS_2;
- uint32_t VBLANK_PARAMETERS_4;
- uint32_t NOM_PARAMETERS_2;
- uint32_t NOM_PARAMETERS_3;
- uint32_t NOM_PARAMETERS_6;
- uint32_t NOM_PARAMETERS_7;
- uint32_t DCN_TTU_QOS_WM;
- uint32_t DCN_GLOBAL_TTU_CNTL;
- uint32_t DCN_SURF0_TTU_CNTL0;
- uint32_t DCN_SURF0_TTU_CNTL1;
- uint32_t DCN_SURF1_TTU_CNTL0;
- uint32_t DCN_SURF1_TTU_CNTL1;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
- uint32_t DCN_VM_MX_L1_TLB_CNTL;
- uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR;
- uint32_t DCHUBBUB_SDPIF_FB_BASE;
- uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
- uint32_t DCN_VM_FB_LOCATION_TOP;
- uint32_t DCN_VM_FB_LOCATION_BASE;
- uint32_t DCN_VM_FB_OFFSET;
- uint32_t DCN_VM_AGP_BASE;
- uint32_t DCN_VM_AGP_BOT;
- uint32_t DCN_VM_AGP_TOP;
- uint32_t CURSOR_SETTINS;
- uint32_t CURSOR_SETTINGS;
- uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
- uint32_t CURSOR_SURFACE_ADDRESS;
- uint32_t CURSOR_SIZE;
- uint32_t CURSOR_CONTROL;
- uint32_t CURSOR_POSITION;
- uint32_t CURSOR_HOT_SPOT;
- uint32_t CURSOR_DST_OFFSET;
-};
+#define HUBP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DCHUBP_CNTL; \
+ uint32_t HUBPREQ_DEBUG_DB; \
+ uint32_t DCSURF_ADDR_CONFIG; \
+ uint32_t DCSURF_TILING_CONFIG; \
+ uint32_t DCSURF_SURFACE_PITCH; \
+ uint32_t DCSURF_SURFACE_PITCH_C; \
+ uint32_t DCSURF_SURFACE_CONFIG; \
+ uint32_t DCSURF_FLIP_CONTROL; \
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION; \
+ uint32_t DCSURF_PRI_VIEWPORT_START; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION; \
+ uint32_t DCSURF_SEC_VIEWPORT_START; \
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SURFACE_INUSE; \
+ uint32_t DCSURF_SURFACE_INUSE_HIGH; \
+ uint32_t DCSURF_SURFACE_INUSE_C; \
+ uint32_t DCSURF_SURFACE_INUSE_HIGH_C; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
+ uint32_t DCSURF_SURFACE_CONTROL; \
+ uint32_t HUBPRET_CONTROL; \
+ uint32_t DCN_EXPANSION_MODE; \
+ uint32_t DCHUBP_REQ_SIZE_CONFIG; \
+ uint32_t DCHUBP_REQ_SIZE_CONFIG_C; \
+ uint32_t BLANK_OFFSET_0; \
+ uint32_t BLANK_OFFSET_1; \
+ uint32_t DST_DIMENSIONS; \
+ uint32_t DST_AFTER_SCALER; \
+ uint32_t PREFETCH_SETTINS; \
+ uint32_t PREFETCH_SETTINGS; \
+ uint32_t VBLANK_PARAMETERS_0; \
+ uint32_t REF_FREQ_TO_PIX_FREQ; \
+ uint32_t VBLANK_PARAMETERS_1; \
+ uint32_t VBLANK_PARAMETERS_3; \
+ uint32_t NOM_PARAMETERS_0; \
+ uint32_t NOM_PARAMETERS_1; \
+ uint32_t NOM_PARAMETERS_4; \
+ uint32_t NOM_PARAMETERS_5; \
+ uint32_t PER_LINE_DELIVERY_PRE; \
+ uint32_t PER_LINE_DELIVERY; \
+ uint32_t PREFETCH_SETTINS_C; \
+ uint32_t PREFETCH_SETTINGS_C; \
+ uint32_t VBLANK_PARAMETERS_2; \
+ uint32_t VBLANK_PARAMETERS_4; \
+ uint32_t NOM_PARAMETERS_2; \
+ uint32_t NOM_PARAMETERS_3; \
+ uint32_t NOM_PARAMETERS_6; \
+ uint32_t NOM_PARAMETERS_7; \
+ uint32_t DCN_TTU_QOS_WM; \
+ uint32_t DCN_GLOBAL_TTU_CNTL; \
+ uint32_t DCN_SURF0_TTU_CNTL0; \
+ uint32_t DCN_SURF0_TTU_CNTL1; \
+ uint32_t DCN_SURF1_TTU_CNTL0; \
+ uint32_t DCN_SURF1_TTU_CNTL1; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB; \
+ uint32_t DCN_VM_MX_L1_TLB_CNTL; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR; \
+ uint32_t DCHUBBUB_SDPIF_FB_BASE; \
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET; \
+ uint32_t DCN_VM_FB_LOCATION_TOP; \
+ uint32_t DCN_VM_FB_LOCATION_BASE; \
+ uint32_t DCN_VM_FB_OFFSET; \
+ uint32_t DCN_VM_AGP_BASE; \
+ uint32_t DCN_VM_AGP_BOT; \
+ uint32_t DCN_VM_AGP_TOP; \
+ uint32_t CURSOR_SETTINS; \
+ uint32_t CURSOR_SETTINGS; \
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH; \
+ uint32_t CURSOR_SURFACE_ADDRESS; \
+ uint32_t CURSOR_SIZE; \
+ uint32_t CURSOR_CONTROL; \
+ uint32_t CURSOR_POSITION; \
+ uint32_t CURSOR_HOT_SPOT; \
+ uint32_t CURSOR_DST_OFFSET
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -397,7 +394,6 @@ struct dcn_mi_registers {
HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-
#define DCN_HUBP_REG_FIELD_LIST(type) \
type HUBP_BLANK_EN;\
type HUBP_TTU_DISABLE;\
@@ -577,6 +573,10 @@ struct dcn_mi_registers {
type CURSOR_DST_X_OFFSET; \
type OUTPUT_FP
+struct dcn_mi_registers {
+ HUBP_COMMON_REG_VARIABLE_LIST;
+};
+
struct dcn_mi_shift {
DCN_HUBP_REG_FIELD_LIST(uint8_t);
};
@@ -611,11 +611,11 @@ void hubp1_program_requestor(
struct _vcs_dpi_display_rq_regs_st *rq_regs);
void hubp1_program_pixel_format(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
enum surface_pixel_format format);
void hubp1_program_size_and_rotation(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
@@ -623,7 +623,7 @@ void hubp1_program_size_and_rotation(
bool horizontal_mirror);
void hubp1_program_tiling(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
@@ -681,4 +681,6 @@ struct dcn_hubp_state {
void hubp1_read_state(struct dcn10_hubp *hubp1,
struct dcn_hubp_state *s);
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8e2ddbc2129c..82572863acab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -32,7 +32,7 @@
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
@@ -43,6 +43,7 @@
#include "custom_float.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
+#include "dcn10_cm_common.h"
#define CTX \
hws->ctx
@@ -158,7 +159,7 @@ void dcn10_log_hw_state(struct dc *dc)
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
- tgn10_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
//only print if OTG master is enabled
if ((s.otg_enabled & 1) == 0)
@@ -425,6 +426,34 @@ static void bios_golden_init(struct dc *dc)
}
}
+static void false_optc_underflow_wa(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct timing_generator *tg)
+{
+ int i;
+ bool underflow;
+
+ if (!dc->hwseq->wa.false_optc_underflow)
+ return;
+
+ underflow = tg->funcs->is_optc_underflow_occurred(tg);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (old_pipe_ctx->stream != stream)
+ continue;
+
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
+ }
+
+ tg->funcs->set_blank_data_double_buffer(tg, true);
+
+ if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
+ tg->funcs->clear_optc_underflow(tg);
+}
+
static enum dc_status dcn10_prog_pixclk_crtc_otg(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -433,9 +462,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space;
struct tg_color black_color = {0};
- bool enableStereo = stream->timing.timing_3d_format == TIMING_3D_FORMAT_NONE ?
- false:true;
- bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
@@ -470,11 +496,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
&stream->timing,
true);
- pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
- pipe_ctx->stream_res.opp,
- enableStereo,
- rightEyePolarity);
-
#if 0 /* move to after enable_crtc */
/* TODO: OPP FMT, ABM. etc. should be done here. */
/* or FPGA now. instance 0 only. TODO: move to opp.c */
@@ -489,12 +510,18 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
/* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
- pipe_ctx->stream_res.tg->funcs->set_blank_color(
- pipe_ctx->stream_res.tg,
- &black_color);
- pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
- hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
+ !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+ hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+ false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
+ }
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -573,41 +600,34 @@ static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
int fe_idx = pipe_ctx->pipe_idx;
struct hubp *hubp = dc->res_pool->hubps[fe_idx];
struct mpc *mpc = dc->res_pool->mpc;
- int opp_id, z_idx;
- int mpcc_id = -1;
+ int opp_id;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
/* look at tree rather than mi here to know if we already reset */
for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
- for (z_idx = 0; z_idx < opp->mpc_tree.num_pipes; z_idx++) {
- if (opp->mpc_tree.dpp[z_idx] == fe_idx) {
- mpcc_id = opp->mpc_tree.mpcc[z_idx];
- break;
- }
- }
- if (mpcc_id != -1)
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, fe_idx);
+ if (mpcc_to_remove != NULL)
break;
}
+
/*Already reset*/
if (opp_id == dc->res_pool->pipe_count)
return;
- mpc->funcs->remove(mpc, &(dc->res_pool->opps[opp_id]->mpc_tree),
- dc->res_pool->opps[opp_id]->inst, fe_idx);
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+ dc->res_pool->opps[opp_id]->mpcc_disconnect_pending[fe_idx] = true;
+
+ dc->optimized_required = true;
if (hubp->funcs->hubp_disconnect)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
-
- pipe_ctx->stream = NULL;
- memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
- memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
- pipe_ctx->top_pipe = NULL;
- pipe_ctx->bottom_pipe = NULL;
- pipe_ctx->plane_state = NULL;
}
static void plane_atomic_power_down(struct dc *dc, int fe_idx)
@@ -636,29 +656,30 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
int fe_idx = pipe_ctx->pipe_idx;
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = dc->res_pool->hubps[fe_idx];
- struct mpc *mpc = dc->res_pool->mpc;
int opp_id = hubp->opp_id;
- struct output_pixel_processor *opp;
- if (opp_id != 0xf) {
- mpc->funcs->wait_for_idle(mpc, hubp->mpcc_id);
- opp = dc->res_pool->opps[hubp->opp_id];
- opp->mpcc_disconnect_pending[hubp->mpcc_id] = false;
- hubp->funcs->set_blank(hubp, true);
- }
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
REG_UPDATE(HUBP_CLK_CNTL[fe_idx],
HUBP_CLOCK_ENABLE, 0);
REG_UPDATE(DPP_CONTROL[fe_idx],
DPP_CLOCK_ENABLE, 0);
- if (opp_id != 0xf && dc->res_pool->opps[opp_id]->mpc_tree.num_pipes == 0)
+ if (opp_id != 0xf && dc->res_pool->opps[opp_id]->mpc_tree_params.opp_list == NULL)
REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
OPP_PIPE_CLOCK_EN, 0);
hubp->power_gated = true;
+ dc->optimized_required = false; /* We're powering off, no need to optimize */
plane_atomic_power_down(dc, fe_idx);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
}
static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -740,25 +761,27 @@ static void dcn10_init_hw(struct dc *dc)
}
}
+ /* Reset all MPCC muxes */
+ dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct output_pixel_processor *opp = dc->res_pool->opps[i];
- struct mpc_tree_cfg *mpc_tree = &opp->mpc_tree;
struct hubp *hubp = dc->res_pool->hubps[i];
- mpc_tree->dpp[0] = i;
- mpc_tree->mpcc[0] = i;
- mpc_tree->num_pipes = 1;
-
pipe_ctx->stream_res.tg = tg;
pipe_ctx->pipe_idx = i;
pipe_ctx->plane_res.hubp = hubp;
hubp->mpcc_id = i;
- hubp->opp_id = dc->res_pool->mpc->funcs->get_opp_id(dc->res_pool->mpc, i);
+ hubp->opp_id = 0xf;
hubp->power_gated = false;
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[i] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+
plane_atomic_disconnect(dc, pipe_ctx);
}
@@ -929,280 +952,10 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-/*modify the method to handle rgb for arr_points*/
-static bool convert_to_custom_float(
- struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
- uint32_t hw_points_num)
-{
- struct custom_float_format fmt;
-
- struct pwl_result_data *rgb = rgb_resulted;
-
- uint32_t i = 0;
-
- fmt.exponenta_bits = 6;
- fmt.mantissa_bits = 12;
- fmt.sign = false;
-
- if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
- &arr_points[0].custom_float_x)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
- &arr_points[0].custom_float_offset)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
- &arr_points[0].custom_float_slope)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- fmt.mantissa_bits = 10;
- fmt.sign = false;
-
- if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
- &arr_points[1].custom_float_x)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
- &arr_points[1].custom_float_y)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
- &arr_points[1].custom_float_slope)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
- fmt.mantissa_bits = 12;
- fmt.sign = true;
- while (i != hw_points_num) {
- if (!convert_to_custom_float_format(rgb->red, &fmt,
- &rgb->red_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(rgb->green, &fmt,
- &rgb->green_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
- if (!convert_to_custom_float_format(rgb->blue, &fmt,
- &rgb->blue_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
- if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
- &rgb->delta_red_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
- &rgb->delta_green_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
- &rgb->delta_blue_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- ++rgb;
- ++i;
- }
-
- return true;
-}
-#define MAX_REGIONS_NUMBER 34
-#define MAX_LOW_POINT 25
-#define NUMBER_SEGMENTS 32
-
-static bool
-dcn10_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
- struct pwl_params *regamma_params)
-{
- struct curve_points *arr_points;
- struct pwl_result_data *rgb_resulted;
- struct pwl_result_data *rgb;
- struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
-
- int32_t segment_start, segment_end;
- int32_t i;
- uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
-
- if (output_tf == NULL || regamma_params == NULL || output_tf->type == TF_TYPE_BYPASS)
- return false;
-
- PERF_TRACE();
-
- arr_points = regamma_params->arr_points;
- rgb_resulted = regamma_params->rgb_resulted;
- hw_points = 0;
-
- memset(regamma_params, 0, sizeof(struct pwl_params));
- memset(seg_distr, 0, sizeof(seg_distr));
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* 32 segments
- * segments are from 2^-25 to 2^7
- */
- for (i = 0; i < 32 ; i++)
- seg_distr[i] = 3;
-
- segment_start = -25;
- segment_end = 7;
- } else {
- /* 10 segments
- * segment is from 2^-10 to 2^0
- * There are less than 256 points, for optimization
- */
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 5;
- seg_distr[9] = 5;
-
- segment_start = -10;
- segment_end = 0;
- }
-
- for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
-
- for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
- hw_points += (1 << seg_distr[k]);
- }
-
- j = 0;
- for (k = 0; k < (segment_end - segment_start); k++) {
- increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
- start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
- for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
- if (j == hw_points - 1)
- break;
- rgb_resulted[j].red = output_tf->tf_pts.red[i];
- rgb_resulted[j].green = output_tf->tf_pts.green[i];
- rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
- j++;
- }
- }
-
- /* last point */
- start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
- rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
- rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
- rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
-
- arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_start));
- arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_end));
-
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
-
- y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
-
- arr_points[0].y = y1_min;
- arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
-
- /* see comment above, m_arrPoints[1].y should be the Y value for the
- * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
- */
- y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
-
- arr_points[1].slope = dal_fixed31_32_zero;
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* for PQ, we want to have a straight line from last HW X point,
- * and the slope to be such that we hit 1.0 at 10000 nits.
- */
- const struct fixed31_32 end_value =
- dal_fixed31_32_from_int(125);
-
- arr_points[1].slope = dal_fixed31_32_div(
- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
- dal_fixed31_32_sub(end_value, arr_points[1].x));
- }
-
- regamma_params->hw_points_num = hw_points;
-
- i = 1;
- for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1) {
- regamma_params->arr_curve_points[k].segments_num =
- seg_distr[k];
- regamma_params->arr_curve_points[i].offset =
- regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
- }
- i++;
- }
-
- if (seg_distr[k] != -1)
- regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
-
- rgb = rgb_resulted;
- rgb_plus_1 = rgb_resulted + 1;
-
- i = 1;
-
- while (i != hw_points + 1) {
- if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
- rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
- rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
- rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
-
- ++rgb_plus_1;
- ++rgb;
- ++i;
- }
-
- convert_to_custom_float(rgb_resulted, arr_points, hw_points);
-
- PERF_TRACE();
-
- return true;
-}
static bool
dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
@@ -1223,9 +976,9 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
- else if (dcn10_translate_regamma_to_hw_format(
+ else if (cm_helper_translate_curve_to_hw_format(
stream->out_transfer_func,
- &dpp->regamma_params)) {
+ &dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
dpp,
&dpp->regamma_params, OPP_REGAMMA_USER);
@@ -1579,7 +1332,6 @@ static void dcn10_enable_plane(
/* make sure OPP_PIPE_CLOCK_EN = 1 */
REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
OPP_PIPE_CLOCK_EN, 1);
- /*TODO: REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, 0x1f);*/
/* TODO: enable/disable in dm as per update type.
if (plane_state) {
@@ -1672,60 +1424,15 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix)
{
- int i;
- struct out_csc_color_matrix tbl_entry;
-
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- enum dc_color_space color_space =
- pipe_ctx->stream->output_color_space;
-
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
- tbl_entry.color_space = color_space;
- //tbl_entry.regval = matrix;
-
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
} else {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
-static void set_mpc_output_csc(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
- struct out_csc_color_matrix tbl_entry;
- enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
-
-
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = matrix[i];
- tbl_entry.color_space = colorspace;
-
- if (mpc->funcs->set_output_csc != NULL)
- mpc->funcs->set_output_csc(mpc,
- opp_id,
- &tbl_entry,
- ocsc_mode);
- } else {
- if (mpc->funcs->set_ocsc_default != NULL)
- mpc->funcs->set_ocsc_default(mpc,
- opp_id,
- colorspace,
- ocsc_mode);
- }
-}
-
static void program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
@@ -1736,13 +1443,6 @@ static void program_output_csc(struct dc *dc,
program_csc_matrix(pipe_ctx,
colorspace,
matrix);
- else
- set_mpc_output_csc(dc,
- pipe_ctx,
- colorspace,
- matrix,
- opp_id);
-
}
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
@@ -1914,35 +1614,73 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- struct mpcc_cfg mpcc_cfg = {0};
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct pipe_ctx *top_pipe;
- bool per_pixel_alpha =
- pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ struct mpcc_blnd_cfg blnd_cfg;
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
/* TODO: proper fix once fpga works */
- mpcc_cfg.dpp_id = hubp->inst;
- mpcc_cfg.opp_id = pipe_ctx->stream_res.opp->inst;
- mpcc_cfg.tree_cfg = &(pipe_ctx->stream_res.opp->mpc_tree);
- for (top_pipe = pipe_ctx->top_pipe; top_pipe; top_pipe = top_pipe->top_pipe)
- mpcc_cfg.z_index++;
if (dc->debug.surface_visual_confirm)
dcn10_get_surface_visual_confirm_color(
- pipe_ctx, &mpcc_cfg.black_color);
+ pipe_ctx, &blnd_cfg.black_color);
else
color_space_to_black_color(
dc, pipe_ctx->stream->output_color_space,
- &mpcc_cfg.black_color);
- mpcc_cfg.per_pixel_alpha = per_pixel_alpha;
+ &blnd_cfg.black_color);
+
+ if (per_pixel_alpha)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ else
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_alpha = 0xff;
+ blnd_cfg.global_gain = 0xff;
+
/* DCN1.0 has output CM before MPC which seems to screw with
* pre-multiplied alpha.
*/
- mpcc_cfg.pre_multiplied_alpha = is_rgb_cspace(
+ blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
pipe_ctx->stream->output_color_space)
&& per_pixel_alpha;
- hubp->mpcc_id = dc->res_pool->mpc->funcs->add(dc->res_pool->mpc, &mpcc_cfg);
- hubp->opp_id = mpcc_cfg.opp_id;
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ mpcc_id = hubp->inst;
+
+ /* check if this MPCC is already being used */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+ /* remove MPCC if being used */
+ if (new_mpcc != NULL)
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
+ else
+ if (dc->debug.sanity_checks)
+ mpc->funcs->assert_mpcc_idle_before_connect(
+ dc->res_pool->mpc, mpcc_id);
+
+ /* Call MPC to insert new plane */
+ new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
+ mpc_tree_params,
+ &blnd_cfg,
+ NULL,
+ NULL,
+ hubp->inst,
+ mpcc_id);
+
+ ASSERT(new_mpcc != NULL);
+
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
}
static void update_scaler(struct pipe_ctx *pipe_ctx)
@@ -1971,7 +1709,7 @@ static void update_dchubp_dpp(
union plane_size size = plane_state->plane_size;
/* depends on DML calculation, DPP clock value may change dynamically */
- if (pipe_ctx->plane_state->update_flags.raw != 0) {
+ if (plane_state->update_flags.bits.full_update) {
enable_dppclk(
dc->hwseq,
pipe_ctx->pipe_idx,
@@ -2015,7 +1753,8 @@ static void update_dchubp_dpp(
}
if (plane_state->update_flags.bits.full_update ||
- plane_state->update_flags.bits.scaling_change) {
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change) {
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
@@ -2037,7 +1776,9 @@ static void update_dchubp_dpp(
plane_state->update_flags.bits.horizontal_mirror_change ||
plane_state->update_flags.bits.rotation_change ||
plane_state->update_flags.bits.swizzle_change ||
- plane_state->update_flags.bits.bpp_change) {
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change) {
hubp->funcs->hubp_program_surface_config(
hubp,
plane_state->format,
@@ -2062,6 +1803,7 @@ static void program_all_pipe_in_tree(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+
if (pipe_ctx->top_pipe == NULL) {
pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
@@ -2072,7 +1814,11 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, !is_pipe_tree_visible(pipe_ctx));
+
+ if (pipe_ctx->stream_res.tg->funcs->set_blank)
+ pipe_ctx->stream_res.tg->funcs->set_blank(
+ pipe_ctx->stream_res.tg,
+ !is_pipe_tree_visible(pipe_ctx));
}
if (pipe_ctx->plane_state != NULL) {
@@ -2179,6 +1925,7 @@ static void dcn10_apply_ctx_for_surface(
{
int i;
struct timing_generator *tg;
+ struct output_pixel_processor *opp;
bool removed_pipe[4] = { false };
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
bool program_water_mark = false;
@@ -2189,6 +1936,8 @@ static void dcn10_apply_ctx_for_surface(
if (!top_pipe_to_program)
return;
+ opp = top_pipe_to_program->stream_res.opp;
+
tg = top_pipe_to_program->stream_res.tg;
tg->funcs->lock(tg);
@@ -2196,7 +1945,8 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0) {
/* OTG blank before remove all front end */
- tg->funcs->set_blank(tg, true);
+ if (tg->funcs->set_blank)
+ tg->funcs->set_blank(tg, true);
}
/* Disconnect unused mpcc */
@@ -2236,24 +1986,14 @@ static void dcn10_apply_ctx_for_surface(
}
}
- if (num_planes > 0) {
+ if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
- /* TODO: this is a hack w/a for switching from mpo to pipe split */
- if (stream->cursor_attributes.address.quad_part != 0) {
- struct dc_cursor_position position = { 0 };
-
- dc_stream_set_cursor_position(
- (struct dc_stream_state *)stream,
- &position);
- dc_stream_set_cursor_attributes(
- (struct dc_stream_state *)stream,
- &stream->cursor_attributes);
- }
- }
-
tg->funcs->unlock(tg);
+ if (num_planes == 0)
+ false_optc_underflow_wa(dc, stream, tg);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -2264,7 +2004,7 @@ static void dcn10_apply_ctx_for_surface(
pipe_ctx->plane_state->update_flags.bits.full_update)
program_water_mark = true;
- if (removed_pipe[i] && num_planes == 0)
+ if (removed_pipe[i])
dcn10_disable_plane(dc, old_pipe_ctx);
}
@@ -2273,6 +2013,7 @@ static void dcn10_apply_ctx_for_surface(
/* pstate stuck check after watermark update */
dcn10_verify_allow_pstate_change_high(dc);
}
+
/* watermark is for all pipes */
hubbub1_program_watermarks(dc->res_pool->hubbub,
&context->bw.dcn.watermarks, ref_clk_mhz);
@@ -2502,10 +2243,10 @@ static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
dcn10_config_stereo_parameters(stream, &flags);
- pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
flags.PROGRAM_STEREO == 1 ? true:false,
- stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+ &stream->timing);
pipe_ctx->stream_res.tg->funcs->program_stereo(
pipe_ctx->stream_res.tg,
@@ -2619,7 +2360,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.pplib_apply_display_requirements =
dcn10_pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
- .edp_power_control = hwss_edp_power_control
+ .edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index b016f4cbd45c..179890b1a8c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -25,8 +25,6 @@
#include "reg_helper.h"
#include "dcn10_mpc.h"
-#include "dc.h"
-#include "mem_input.h"
#define REG(reg)\
mpc10->mpc_regs->reg
@@ -38,17 +36,13 @@
#define FN(reg_name, field_name) \
mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
-#define MODE_TOP_ONLY 1
-#define MODE_BLEND 3
-#define BLND_PP_ALPHA 0
-#define BLND_GLOBAL_ALPHA 2
-
-static void mpc10_set_bg_color(
- struct dcn10_mpc *mpc10,
+void mpc1_set_bg_color(struct mpc *mpc,
struct tg_color *bg_color,
- int id)
+ int mpcc_id)
{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
/* mpc color is 12 bit. tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
* hw block translate to correct color depth.
@@ -57,15 +51,47 @@ static void mpc10_set_bg_color(
uint32_t bg_g_y = bg_color->color_g_y << 2;
uint32_t bg_b_cb = bg_color->color_b_cb << 2;
- REG_SET(MPCC_BG_R_CR[id], 0,
+ REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
- REG_SET(MPCC_BG_G_Y[id], 0,
+ REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
- REG_SET(MPCC_BG_B_CB[id], 0,
+ REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
}
-void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
+static void mpc1_update_blending(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only,
+ MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
+ MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
+
+ mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
+}
+
+void mpc1_update_stereo_mix(
+ struct mpc *mpc,
+ struct mpcc_sm_cfg *sm_cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_UPDATE_6(MPCC_SM_CONTROL[mpcc_id],
+ MPCC_SM_EN, sm_cfg->enable,
+ MPCC_SM_MODE, sm_cfg->sm_mode,
+ MPCC_SM_FRAME_ALT, sm_cfg->frame_alt,
+ MPCC_SM_FIELD_ALT, sm_cfg->field_alt,
+ MPCC_SM_FORCE_NEXT_FRAME_POL, sm_cfg->force_next_frame_porlarity,
+ MPCC_SM_FORCE_NEXT_TOP_POL, sm_cfg->force_next_field_polarity);
+}
+void mpc1_assert_idle_mpcc(struct mpc *mpc, int id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
@@ -75,39 +101,52 @@ void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
1, 100000);
}
-static int mpc10_get_idle_mpcc_id(struct dcn10_mpc *mpc10)
+struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id)
{
- int i;
- int last_free_mpcc_id = -1;
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- for (i = 0; i < mpc10->num_mpcc; i++) {
- uint32_t is_idle = 0;
+ ASSERT(mpcc_id < mpc10->num_mpcc);
+ return &(mpc->mpcc_array[mpcc_id]);
+}
- if (mpc10->mpcc_in_use_mask & 1 << i)
- continue;
+struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+{
+ struct mpcc *tmp_mpcc = tree->opp_list;
- last_free_mpcc_id = i;
- REG_GET(MPCC_STATUS[i], MPCC_IDLE, &is_idle);
- if (is_idle)
- return i;
+ while (tmp_mpcc != NULL) {
+ if (tmp_mpcc->dpp_id == dpp_id)
+ return tmp_mpcc;
+ tmp_mpcc = tmp_mpcc->mpcc_bot;
}
+ return NULL;
+}
- /* This assert should never trigger, we have mpcc leak if it does */
- ASSERT(last_free_mpcc_id != -1);
-
- mpc10_assert_idle_mpcc(&mpc10->base, last_free_mpcc_id);
- return last_free_mpcc_id;
+bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ unsigned int top_sel;
+ unsigned int opp_id;
+ unsigned int idle;
+
+ REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle);
+ if (top_sel == 0xf && opp_id == 0xf && idle)
+ return true;
+ else
+ return false;
}
-static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int id)
+void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle;
- REG_GET(MPCC_TOP_SEL[id],
+ REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
if (top_sel == 0xf) {
- REG_GET_2(MPCC_STATUS[id],
+ REG_GET_2(MPCC_STATUS[mpcc_id],
MPCC_BUSY, &mpc_busy,
MPCC_IDLE, &mpc_idle);
@@ -116,241 +155,269 @@ static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int i
}
}
-void mpc10_mpcc_remove(
- struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int dpp_id)
-{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
-
- /* find z_idx for the dpp to be removed */
- for (z_idx = 0; z_idx < tree_cfg->num_pipes; z_idx++)
- if (tree_cfg->dpp[z_idx] == dpp_id)
- break;
-
- if (z_idx == tree_cfg->num_pipes) {
- /* In case of resume from S3/S4, remove mpcc from bios left over */
- REG_SET(MPCC_OPP_ID[dpp_id], 0,
- MPCC_OPP_ID, 0xf);
- REG_SET(MPCC_TOP_SEL[dpp_id], 0,
- MPCC_TOP_SEL, 0xf);
- REG_SET(MPCC_BOT_SEL[dpp_id], 0,
- MPCC_BOT_SEL, 0xf);
- return;
- }
-
- mpcc_id = tree_cfg->mpcc[z_idx];
-
- REG_SET(MPCC_OPP_ID[mpcc_id], 0,
- MPCC_OPP_ID, 0xf);
- REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
- MPCC_TOP_SEL, 0xf);
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
-
- if (z_idx > 0) {
- int top_mpcc_id = tree_cfg->mpcc[z_idx - 1];
-
- if (z_idx + 1 < tree_cfg->num_pipes)
- /* mpcc to be removed is in the middle of the tree */
- REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
- MPCC_BOT_SEL, tree_cfg->mpcc[z_idx + 1]);
- else {
- /* mpcc to be removed is at the bottom of the tree */
- REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
- REG_UPDATE(MPCC_CONTROL[top_mpcc_id],
- MPCC_MODE, MODE_TOP_ONLY);
- }
- } else if (tree_cfg->num_pipes > 1)
- /* mpcc to be removed is at the top of the tree */
- REG_SET(MUX[opp_id], 0,
- MPC_OUT_MUX, tree_cfg->mpcc[z_idx + 1]);
- else
- /* mpcc to be removed is the only one in the tree */
- REG_SET(MUX[opp_id], 0, MPC_OUT_MUX, 0xf);
-
- /* mark this mpcc as not in use */
- mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
- tree_cfg->num_pipes--;
- for (; z_idx < tree_cfg->num_pipes; z_idx++) {
- tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx + 1];
- tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx + 1];
- }
- tree_cfg->dpp[tree_cfg->num_pipes] = 0xdeadbeef;
- tree_cfg->mpcc[tree_cfg->num_pipes] = 0xdeadbeef;
-}
-
-static void mpc10_add_to_tree_cfg(
+/*
+ * Insert DPP into MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for OPP output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+struct mpcc *mpc1_insert_plane(
struct mpc *mpc,
- struct mpcc_cfg *cfg,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_mode = MODE_TOP_ONLY;
- int position = cfg->z_index;
- struct mpc_tree_cfg *tree_cfg = cfg->tree_cfg;
- int alpha_blnd_mode = cfg->per_pixel_alpha ?
- BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
- int z_idx;
+ struct mpcc *new_mpcc = NULL;
- REG_SET(MPCC_OPP_ID[mpcc_id], 0,
- MPCC_OPP_ID, cfg->opp_id);
+ /* sanity check parameters */
+ ASSERT(mpcc_id < mpc10->num_mpcc);
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
- REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
- MPCC_TOP_SEL, cfg->dpp_id);
+ if (insert_above_mpcc) {
+ /* check insert_above_mpcc exist in tree->opp_list */
+ struct mpcc *temp_mpcc = tree->opp_list;
- if (position == 0) {
- /* idle dpp/mpcc is added to the top layer of tree */
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc == NULL)
+ return NULL;
+ }
- if (tree_cfg->num_pipes > 0) {
- /* get instance of previous top mpcc */
- int prev_top_mpcc_id = tree_cfg->mpcc[0];
+ /* Get and update MPCC struct parameters */
+ new_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ new_mpcc->dpp_id = dpp_id;
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, prev_top_mpcc_id);
- mpcc_mode = MODE_BLEND;
+ /* program mux and MPCC_MODE */
+ if (insert_above_mpcc) {
+ new_mpcc->mpcc_bot = insert_above_mpcc;
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, insert_above_mpcc->mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
+ } else {
+ new_mpcc->mpcc_bot = NULL;
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ }
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
+
+ /* update mpc tree mux setting */
+ if (tree->opp_list == insert_above_mpcc) {
+ /* insert the toppest mpcc */
+ tree->opp_list = new_mpcc;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, mpcc_id);
+ } else {
+ /* find insert position */
+ struct mpcc *temp_mpcc = tree->opp_list;
+
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc && temp_mpcc->mpcc_bot == insert_above_mpcc) {
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, MPCC_BOT_SEL, mpcc_id);
+ temp_mpcc->mpcc_bot = new_mpcc;
+ if (!insert_above_mpcc)
+ REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
+ MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
}
+ }
- /* opp will get new output. from new added mpcc */
- REG_SET(MUX[cfg->opp_id], 0, MPC_OUT_MUX, mpcc_id);
-
- } else if (position == tree_cfg->num_pipes) {
- /* idle dpp/mpcc is added to the bottom layer of tree */
-
- /* get instance of previous bottom mpcc, set to middle layer */
- int prev_bot_mpcc_id = tree_cfg->mpcc[tree_cfg->num_pipes - 1];
-
- REG_SET(MPCC_BOT_SEL[prev_bot_mpcc_id], 0,
- MPCC_BOT_SEL, mpcc_id);
- REG_UPDATE(MPCC_CONTROL[prev_bot_mpcc_id],
- MPCC_MODE, MODE_BLEND);
-
- /* mpcc_id become new bottom mpcc*/
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
+ /* update the blending configuration */
+ new_mpcc->blnd_cfg = *blnd_cfg;
+ mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id);
- } else {
- /* idle dpp/mpcc is added to middle of tree */
- int above_mpcc_id = tree_cfg->mpcc[position - 1];
- int below_mpcc_id = tree_cfg->mpcc[position];
-
- /* mpcc above new mpcc_id has new bottom mux*/
- REG_SET(MPCC_BOT_SEL[above_mpcc_id], 0,
- MPCC_BOT_SEL, mpcc_id);
- REG_UPDATE(MPCC_CONTROL[above_mpcc_id],
- MPCC_MODE, MODE_BLEND);
-
- /* mpcc_id bottom mux is from below mpcc*/
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, below_mpcc_id);
- mpcc_mode = MODE_BLEND;
+ /* update the stereo mix settings, if provided */
+ if (sm_cfg != NULL) {
+ new_mpcc->sm_cfg = *sm_cfg;
+ mpc1_update_stereo_mix(mpc, sm_cfg, mpcc_id);
}
- REG_SET_4(MPCC_CONTROL[mpcc_id], 0xffffffff,
- MPCC_MODE, mpcc_mode,
- MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
- MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha,
- MPCC_BLND_ACTIVE_OVERLAP_ONLY, false);
+ /* mark this mpcc as in use */
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
- /* update mpc_tree_cfg with new mpcc */
- for (z_idx = tree_cfg->num_pipes; z_idx > position; z_idx--) {
- tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx - 1];
- tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx - 1];
- }
- tree_cfg->dpp[position] = cfg->dpp_id;
- tree_cfg->mpcc[position] = mpcc_id;
- tree_cfg->num_pipes++;
+ return new_mpcc;
}
-int mpc10_mpcc_add(struct mpc *mpc, struct mpcc_cfg *cfg)
+/*
+ * Remove a specified MPCC from the MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return: void
+ */
+void mpc1_remove_mpcc(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc_to_remove)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
-
- ASSERT(cfg->z_index < mpc10->num_mpcc);
-
- /* check in dpp already exists in mpc tree */
- for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
- if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
- break;
- if (z_idx == cfg->tree_cfg->num_pipes) {
- ASSERT(cfg->z_index <= cfg->tree_cfg->num_pipes);
- mpcc_id = mpc10_get_idle_mpcc_id(mpc10);
-
- /*
- * TODO: remove hack
- * Note: currently there is a bug in init_hw such that
- * on resume from hibernate, BIOS sets up MPCC0, and
- * we do mpcc_remove but the mpcc cannot go to idle
- * after remove. This cause us to pick mpcc1 here,
- * which causes a pstate hang for yet unknown reason.
- */
- mpcc_id = cfg->dpp_id;
- /* end hack*/
-
- ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
-
- if (mpc->ctx->dc->debug.sanity_checks)
- mpc10_assert_mpcc_idle_before_connect(mpc10, mpcc_id);
+ bool found = false;
+ int mpcc_id = mpcc_to_remove->mpcc_id;
+
+ if (tree->opp_list == mpcc_to_remove) {
+ found = true;
+ /* remove MPCC from top of tree */
+ if (mpcc_to_remove->mpcc_bot) {
+ /* set the next MPCC in list to be the top MPCC */
+ tree->opp_list = mpcc_to_remove->mpcc_bot;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, tree->opp_list->mpcc_id);
+ } else {
+ /* there are no other MPCC is list */
+ tree->opp_list = NULL;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, 0xf);
+ }
} else {
- ASSERT(cfg->z_index < cfg->tree_cfg->num_pipes);
- mpcc_id = cfg->tree_cfg->mpcc[z_idx];
- mpc10_mpcc_remove(mpc, cfg->tree_cfg, cfg->opp_id, cfg->dpp_id);
+ /* find mpcc to remove MPCC list */
+ struct mpcc *temp_mpcc = tree->opp_list;
+
+ while (temp_mpcc && temp_mpcc->mpcc_bot != mpcc_to_remove)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+
+ if (temp_mpcc && temp_mpcc->mpcc_bot == mpcc_to_remove) {
+ found = true;
+ temp_mpcc->mpcc_bot = mpcc_to_remove->mpcc_bot;
+ if (mpcc_to_remove->mpcc_bot) {
+ /* remove MPCC in middle of list */
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_to_remove->mpcc_bot->mpcc_id);
+ } else {
+ /* remove MPCC from bottom of list */
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
+ MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ }
+ }
}
- /* add dpp/mpcc pair to mpc_tree_cfg and update mpcc registers */
- mpc10_add_to_tree_cfg(mpc, cfg, mpcc_id);
+ if (found) {
+ /* turn off MPCC mux registers */
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
- /* set background color */
- mpc10_set_bg_color(mpc10, &cfg->black_color, mpcc_id);
-
- /* mark this mpcc as in use */
- mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+ /* mark this mpcc as not in use */
+ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
+ mpcc_to_remove->dpp_id = 0xf;
+ mpcc_to_remove->mpcc_bot = NULL;
+ } else {
+ /* In case of resume from S3/S4, remove mpcc from bios left over */
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ }
+}
- return mpcc_id;
+static void mpc1_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->sm_cfg.enable = false;
}
-void mpc10_update_blend_mode(
- struct mpc *mpc,
- struct mpcc_cfg *cfg)
+/*
+ * Reset the MPCC HW status by disconnecting all muxes.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ *
+ * Return: void
+ */
+void mpc1_mpc_init(struct mpc *mpc)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
- int alpha_blnd_mode = cfg->per_pixel_alpha ?
- BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+ int mpcc_id;
+ int opp_id;
- /* find z_idx for the dpp that requires blending mode update*/
- for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
- if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
- break;
+ mpc10->mpcc_in_use_mask = 0;
+ for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
- ASSERT(z_idx < cfg->tree_cfg->num_pipes);
- mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+ mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
+ }
- REG_UPDATE_2(MPCC_CONTROL[mpcc_id],
- MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
- MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha);
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf);
+ }
}
-int mpc10_get_opp_id(struct mpc *mpc, int mpcc_id)
+void mpc1_init_mpcc_list_from_hw(
+ struct mpc *mpc,
+ struct mpc_tree *tree)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int opp_id = 0xF;
-
- REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
-
- return opp_id;
+ unsigned int opp_id;
+ unsigned int top_sel;
+ unsigned int bot_sel;
+ unsigned int out_mux;
+ struct mpcc *mpcc;
+ int mpcc_id;
+ int bot_mpcc_id;
+
+ REG_GET(MUX[tree->opp_id], MPC_OUT_MUX, &out_mux);
+
+ if (out_mux != 0xf) {
+ for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
+ REG_GET(MPCC_BOT_SEL[mpcc_id], MPCC_BOT_SEL, &bot_sel);
+
+ if (bot_sel == mpcc_id)
+ bot_sel = 0xf;
+
+ if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
+ mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ mpcc->dpp_id = top_sel;
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+
+ if (out_mux == mpcc_id)
+ tree->opp_list = mpcc;
+ if (bot_sel != 0xf && bot_sel < mpc10->num_mpcc) {
+ bot_mpcc_id = bot_sel;
+ REG_GET(MPCC_OPP_ID[bot_mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_TOP_SEL[bot_mpcc_id], MPCC_TOP_SEL, &top_sel);
+ if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
+ struct mpcc *mpcc_bottom = mpc1_get_mpcc(mpc, bot_mpcc_id);
+
+ mpcc->mpcc_bot = mpcc_bottom;
+ }
+ }
+ }
+ }
+ }
}
const struct mpc_funcs dcn10_mpc_funcs = {
- .add = mpc10_mpcc_add,
- .remove = mpc10_mpcc_remove,
- .wait_for_idle = mpc10_assert_idle_mpcc,
- .update_blend_mode = mpc10_update_blend_mode,
- .get_opp_id = mpc10_get_opp_id,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
+ .wait_for_idle = mpc1_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .update_blending = mpc1_update_blending,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
@@ -360,6 +427,8 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc)
{
+ int i;
+
mpc10->base.ctx = ctx;
mpc10->base.funcs = &dcn10_mpc_funcs;
@@ -370,5 +439,8 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
mpc10->mpcc_in_use_mask = 0;
mpc10->num_mpcc = num_mpcc;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc1_init_mpcc(&mpc10->base.mpcc_array[i], i);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index e85e1f342266..267a2995ef6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -30,9 +30,6 @@
#define TO_DCN10_MPC(mpc_base) \
container_of(mpc_base, struct dcn10_mpc, base)
-#define MAX_MPCC 6
-#define MAX_OPP 6
-
#define MPC_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MPCC_TOP_SEL, MPCC, inst),\
SRII(MPCC_BOT_SEL, MPCC, inst),\
@@ -42,7 +39,8 @@
SRII(MPCC_BG_G_Y, MPCC, inst),\
SRII(MPCC_BG_R_CR, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_BG_B_CB, MPCC, inst)
+ SRII(MPCC_BG_B_CB, MPCC, inst),\
+ SRII(MPCC_SM_CONTROL, MPCC, inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MUX, MPC_OUT, inst)
@@ -56,6 +54,7 @@
uint32_t MPCC_BG_G_Y[MAX_MPCC]; \
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
+ uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
uint32_t MUX[MAX_OPP];
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -65,12 +64,20 @@
SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\
SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\
SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_ALPHA, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_GAIN, mask_sh),\
SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\
SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\
SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\
SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\
SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\
SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_EN, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_MODE, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FRAME_ALT, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
#define MPC_REG_FIELD_LIST(type) \
@@ -80,12 +87,20 @@
type MPCC_ALPHA_BLND_MODE;\
type MPCC_ALPHA_MULTIPLIED_MODE;\
type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\
+ type MPCC_GLOBAL_ALPHA;\
+ type MPCC_GLOBAL_GAIN;\
type MPCC_IDLE;\
type MPCC_BUSY;\
type MPCC_OPP_ID;\
type MPCC_BG_G_Y;\
type MPCC_BG_R_CR;\
type MPCC_BG_B_CB;\
+ type MPCC_SM_EN;\
+ type MPCC_SM_MODE;\
+ type MPCC_SM_FRAME_ALT;\
+ type MPCC_SM_FIELD_ALT;\
+ type MPCC_SM_FORCE_NEXT_FRAME_POL;\
+ type MPCC_SM_FORCE_NEXT_TOP_POL;\
type MPC_OUT_MUX;
struct dcn_mpc_registers {
@@ -117,23 +132,55 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpcc10,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc);
-int mpc10_mpcc_add(
- struct mpc *mpc,
- struct mpcc_cfg *cfg);
-
-void mpc10_mpcc_remove(
- struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int dpp_id);
-
-void mpc10_assert_idle_mpcc(
- struct mpc *mpc,
- int id);
-
-void mpc10_update_blend_mode(
- struct mpc *mpc,
- struct mpcc_cfg *cfg);
-int mpc10_get_opp_id(struct mpc *mpc, int mpcc_id);
+struct mpcc *mpc1_insert_plane(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void mpc1_remove_mpcc(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
+
+void mpc1_mpc_init(
+ struct mpc *mpc);
+
+void mpc1_assert_idle_mpcc(
+ struct mpc *mpc,
+ int id);
+
+void mpc1_set_bg_color(
+ struct mpc *mpc,
+ struct tg_color *bg_color,
+ int id);
+
+void mpc1_update_stereo_mix(
+ struct mpc *mpc,
+ struct mpcc_sm_cfg *sm_cfg,
+ int mpcc_id);
+
+bool mpc1_is_mpcc_idle(
+ struct mpc *mpc,
+ int mpcc_id);
+
+void mpc1_assert_mpcc_idle_before_connect(
+ struct mpc *mpc,
+ int mpcc_id);
+
+void mpc1_init_mpcc_list_from_hw(
+ struct mpc *mpc,
+ struct mpc_tree *tree);
+
+struct mpcc *mpc1_get_mpcc(
+ struct mpc *mpc,
+ int mpcc_id);
+
+struct mpcc *mpc1_get_mpcc_for_dpp(
+ struct mpc_tree *tree,
+ int dpp_id);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 6d6f67b7d30e..f6ba0eef4489 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -296,13 +296,75 @@ void opp1_program_fmt(
return;
}
-void opp1_set_stereo_polarity(
- struct output_pixel_processor *opp,
- bool enable, bool rightEyePolarity)
+void opp1_program_stereo(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
- REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+ uint32_t active_width = timing->h_addressable - timing->h_border_right - timing->h_border_right;
+ uint32_t space1_size = timing->v_total - timing->v_addressable;
+ /* TODO: confirm computation of space2_size */
+ uint32_t space2_size = timing->v_total - timing->v_addressable;
+
+ if (!enable) {
+ active_width = 0;
+ space1_size = 0;
+ space2_size = 0;
+ }
+
+ /* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */
+ REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0);
+
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width);
+
+ /* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE registers
+ * In 3D progressive frames, Vactive space happens only in between the 2 frames,
+ * so only need to program OPPBUF_3D_VACT_SPACE1_SIZE
+ * In 3D alternative frames, left and right frames, top and bottom field.
+ */
+ if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE)
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, space2_size);
+ else
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
+
+ /* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */
+ /*
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_DUMMY_DATA_R, data_r);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+ OPPBUF_DUMMY_DATA_G, data_g);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+ OPPBUF_DUMMY_DATA_B, _data_b);
+ */
+}
+
+void opp1_program_oppbuf(
+ struct output_pixel_processor *opp,
+ struct oppbuf_params *oppbuf)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ /* Program the oppbuf active width to be the frame width from mpc */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width);
+
+ /* Specifies the number of segments in multi-segment mode (DP-MSO operation)
+ * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel.
+ * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel.
+ * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes.
+ * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments."
+ */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation);
+
+ /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num);
+
+ /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported).
+ * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1."
+ */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
+
}
/*****************************************/
@@ -319,7 +381,7 @@ static struct opp_funcs dcn10_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
- .opp_set_stereo_polarity = opp1_set_stereo_polarity,
+ .opp_program_stereo = opp1_program_stereo,
.opp_destroy = opp1_destroy
};
@@ -330,6 +392,7 @@ void dcn10_opp_construct(struct dcn10_opp *oppn10,
const struct dcn10_opp_shift *opp_shift,
const struct dcn10_opp_mask *opp_mask)
{
+
oppn10->base.ctx = ctx;
oppn10->base.inst = inst;
oppn10->base.funcs = &dcn10_opp_funcs;
@@ -338,4 +401,3 @@ void dcn10_opp_construct(struct dcn10_opp *oppn10,
oppn10->opp_shift = opp_shift;
oppn10->opp_mask = opp_mask;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index f3c298ec37fb..bc5058af6266 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -41,7 +41,10 @@
SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
SRI(FMT_CLAMP_CNTL, FMT, id), \
SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
- SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id)
+ SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
+ SRI(OPPBUF_CONTROL, OPPBUF, id),\
+ SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
+ SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id)
#define OPP_REG_LIST_DCN10(id) \
OPP_REG_LIST_DCN(id)
@@ -54,7 +57,11 @@
uint32_t FMT_DITHER_RAND_B_SEED; \
uint32_t FMT_CLAMP_CNTL; \
uint32_t FMT_DYNAMIC_EXP_CNTL; \
- uint32_t FMT_MAP420_MEMORY_CONTROL;
+ uint32_t FMT_MAP420_MEMORY_CONTROL; \
+ uint32_t OPPBUF_CONTROL; \
+ uint32_t OPPBUF_CONTROL1; \
+ uint32_t OPPBUF_3D_PARAMETERS_0; \
+ uint32_t OPPBUF_3D_PARAMETERS_1
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -78,10 +85,16 @@
OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \
OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \
OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \
- OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh)
+ OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh)
#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
- OPP_MASK_SH_LIST_DCN(mask_sh)
+ OPP_MASK_SH_LIST_DCN(mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh)
#define OPP_DCN10_REG_FIELD_LIST(type) \
type FMT_TRUNCATE_EN; \
@@ -105,18 +118,25 @@
type FMT_DYNAMIC_EXP_EN; \
type FMT_DYNAMIC_EXP_MODE; \
type FMT_MAP420MEM_PWR_FORCE; \
- type FMT_STEREOSYNC_OVERRIDE;
+ type FMT_STEREOSYNC_OVERRIDE; \
+ type OPPBUF_ACTIVE_WIDTH;\
+ type OPPBUF_PIXEL_REPETITION;\
+ type OPPBUF_DISPLAY_SEGMENTATION;\
+ type OPPBUF_OVERLAP_PIXEL_NUM;\
+ type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \
+ type OPPBUF_3D_VACT_SPACE1_SIZE; \
+ type OPPBUF_3D_VACT_SPACE2_SIZE
struct dcn10_opp_registers {
- OPP_COMMON_REG_VARIABLE_LIST
+ OPP_COMMON_REG_VARIABLE_LIST;
};
struct dcn10_opp_shift {
- OPP_DCN10_REG_FIELD_LIST(uint8_t)
+ OPP_DCN10_REG_FIELD_LIST(uint8_t);
};
struct dcn10_opp_mask {
- OPP_DCN10_REG_FIELD_LIST(uint32_t)
+ OPP_DCN10_REG_FIELD_LIST(uint32_t);
};
struct dcn10_opp {
@@ -151,9 +171,10 @@ void opp1_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params);
-void opp1_set_stereo_polarity(
- struct output_pixel_processor *opp,
- bool enable, bool rightEyePolarity);
+void opp1_program_stereo(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing);
void opp1_destroy(struct output_pixel_processor **opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 73ff78f9cae1..4bf64d1b2c60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -23,19 +23,20 @@
*
*/
+
#include "reg_helper.h"
-#include "dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dc.h"
#define REG(reg)\
- tgn10->tg_regs->reg
+ optc1->tg_regs->reg
#define CTX \
- tgn10->base.ctx
+ optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
- tgn10->tg_shift->field_name, tgn10->tg_mask->field_name
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
@@ -45,8 +46,8 @@
* This is a workaround for a bug that has existed since R5xx and has not been
* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
*/
-static void tgn10_apply_front_porch_workaround(
- struct timing_generator *tg,
+static void optc1_apply_front_porch_workaround(
+ struct timing_generator *optc,
struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
@@ -58,30 +59,30 @@ static void tgn10_apply_front_porch_workaround(
}
}
-static void tgn10_program_global_sync(
- struct timing_generator *tg)
+void optc1_program_global_sync(
+ struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- if (tg->dlg_otg_param.vstartup_start == 0) {
+ if (optc->dlg_otg_param.vstartup_start == 0) {
BREAK_TO_DEBUGGER();
return;
}
REG_SET(OTG_VSTARTUP_PARAM, 0,
- VSTARTUP_START, tg->dlg_otg_param.vstartup_start);
+ VSTARTUP_START, optc->dlg_otg_param.vstartup_start);
REG_SET_2(OTG_VUPDATE_PARAM, 0,
- VUPDATE_OFFSET, tg->dlg_otg_param.vupdate_offset,
- VUPDATE_WIDTH, tg->dlg_otg_param.vupdate_width);
+ VUPDATE_OFFSET, optc->dlg_otg_param.vupdate_offset,
+ VUPDATE_WIDTH, optc->dlg_otg_param.vupdate_width);
REG_SET(OTG_VREADY_PARAM, 0,
- VREADY_OFFSET, tg->dlg_otg_param.vready_offset);
+ VREADY_OFFSET, optc->dlg_otg_param.vready_offset);
}
-static void tgn10_disable_stereo(struct timing_generator *tg)
+static void optc1_disable_stereo(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_STEREO_CONTROL, 0,
OTG_STEREO_EN, 0);
@@ -90,11 +91,6 @@ static void tgn10_disable_stereo(struct timing_generator *tg)
OTG_3D_STRUCTURE_EN, 0,
OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
-
- REG_UPDATE(OPPBUF_CONTROL,
- OPPBUF_ACTIVE_WIDTH, 0);
- REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
- OPPBUF_3D_VACT_SPACE1_SIZE, 0);
}
/**
@@ -102,8 +98,8 @@ static void tgn10_disable_stereo(struct timing_generator *tg)
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
*/
-static void tgn10_program_timing(
- struct timing_generator *tg,
+void optc1_program_timing(
+ struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
bool use_vbios)
{
@@ -121,10 +117,10 @@ static void tgn10_program_timing(
uint32_t h_div_2;
int32_t vertical_line_start;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
patched_crtc_timing = *dc_crtc_timing;
- tgn10_apply_front_porch_workaround(tg, &patched_crtc_timing);
+ optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
/* Load horizontal timing */
@@ -217,7 +213,7 @@ static void tgn10_program_timing(
/* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
* program the reg for interrupt postition.
*/
- vertical_line_start = asic_blank_end - tg->dlg_otg_param.vstartup_start + 1;
+ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0) {
ASSERT(0);
vertical_line_start = 0;
@@ -233,26 +229,25 @@ static void tgn10_program_timing(
OTG_V_SYNC_A_POL, v_sync_polarity);
v_init = asic_blank_start;
- if (tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
- tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- tg->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ optc->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
start_point = 1;
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
}
v_fp2 = 0;
- if (tg->dlg_otg_param.vstartup_start > asic_blank_end)
- v_fp2 = tg->dlg_otg_param.vstartup_start > asic_blank_end;
+ if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
+ v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
/* Interlace */
if (patched_crtc_timing.flags.INTERLACE == 1) {
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 1);
v_init = v_init / 2;
- if ((tg->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
+ if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
v_fp2 = v_fp2 / 2;
- }
- else
+ } else
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 0);
@@ -270,13 +265,13 @@ static void tgn10_program_timing(
OTG_START_POINT_CNTL, start_point,
OTG_FIELD_NUMBER_CNTL, field_num);
- tgn10_program_global_sync(tg);
+ optc1_program_global_sync(optc);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
* program_horz_count_by_2
* for DVI 30bpp mode, 0 otherwise
- * program_horz_count_by_2(tg, &patched_crtc_timing);
+ * program_horz_count_by_2(optc, &patched_crtc_timing);
*/
/* Enable stereo - only when we need to pack 3D frame. Other types
@@ -290,9 +285,9 @@ static void tgn10_program_timing(
}
-static void tgn10_set_blank_data_double_buffer(struct timing_generator *tg, bool enable)
+static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
@@ -304,9 +299,9 @@ static void tgn10_set_blank_data_double_buffer(struct timing_generator *tg, bool
* unblank_crtc
* Call ASIC Control Object to UnBlank CRTC.
*/
-static void tgn10_unblank_crtc(struct timing_generator *tg)
+static void optc1_unblank_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t vertical_interrupt_enable = 0;
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
@@ -316,7 +311,7 @@ static void tgn10_unblank_crtc(struct timing_generator *tg)
* this check will be removed.
*/
if (vertical_interrupt_enable)
- tgn10_set_blank_data_double_buffer(tg, true);
+ optc1_set_blank_data_double_buffer(optc, true);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
@@ -328,36 +323,29 @@ static void tgn10_unblank_crtc(struct timing_generator *tg)
* Call ASIC Control Object to Blank CRTC.
*/
-static void tgn10_blank_crtc(struct timing_generator *tg)
+static void optc1_blank_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 1,
OTG_BLANK_DE_MODE, 0);
- /* todo: why are we waiting for BLANK_DATA_EN? shouldn't we be waiting
- * for status?
- */
- REG_WAIT(OTG_BLANK_CONTROL,
- OTG_BLANK_DATA_EN, 1,
- 1, 100000);
-
- tgn10_set_blank_data_double_buffer(tg, false);
+ optc1_set_blank_data_double_buffer(optc, false);
}
-static void tgn10_set_blank(struct timing_generator *tg,
+void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking)
{
if (enable_blanking)
- tgn10_blank_crtc(tg);
+ optc1_blank_crtc(optc);
else
- tgn10_unblank_crtc(tg);
+ optc1_unblank_crtc(optc);
}
-static bool tgn10_is_blanked(struct timing_generator *tg)
+bool optc1_is_blanked(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_en;
uint32_t blank_state;
@@ -368,9 +356,9 @@ static bool tgn10_is_blanked(struct timing_generator *tg)
return blank_en && blank_state;
}
-static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (enable) {
REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
@@ -403,19 +391,19 @@ static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
-static bool tgn10_enable_crtc(struct timing_generator *tg)
+static bool optc1_enable_crtc(struct timing_generator *optc)
{
/* TODO FPGA wait for answer
* OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
* OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
*/
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG. For DCN1.0, ODM is remoed.
* OPP and OPTC should 1:1 mapping
*/
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
- OPTC_SRC_SEL, tg->inst);
+ OPTC_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
@@ -430,9 +418,9 @@ static bool tgn10_enable_crtc(struct timing_generator *tg)
}
/* disable_crtc - call ASIC Control Object to disable Timing generator. */
-static bool tgn10_disable_crtc(struct timing_generator *tg)
+bool optc1_disable_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* disable otg request until end of the first line
* in the vertical blank region
@@ -453,11 +441,11 @@ static bool tgn10_disable_crtc(struct timing_generator *tg)
}
-static void tgn10_program_blank_color(
- struct timing_generator *tg,
+void optc1_program_blank_color(
+ struct timing_generator *optc,
const struct tg_color *black_color)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_3(OTG_BLACK_COLOR, 0,
OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
@@ -465,15 +453,15 @@ static void tgn10_program_blank_color(
OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
-static bool tgn10_validate_timing(
- struct timing_generator *tg,
+bool optc1_validate_timing(
+ struct timing_generator *optc,
const struct dc_crtc_timing *timing)
{
uint32_t interlace_factor;
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
ASSERT(timing != NULL);
@@ -503,19 +491,19 @@ static bool tgn10_validate_timing(
* needs more than 8192 horizontal and
* more than 8192 vertical total pixels)
*/
- if (timing->h_total > tgn10->max_h_total ||
- timing->v_total > tgn10->max_v_total)
+ if (timing->h_total > optc1->max_h_total ||
+ timing->v_total > optc1->max_v_total)
return false;
- if (h_blank < tgn10->min_h_blank)
+ if (h_blank < optc1->min_h_blank)
return false;
- if (timing->h_sync_width < tgn10->min_h_sync_width ||
- timing->v_sync_width < tgn10->min_v_sync_width)
+ if (timing->h_sync_width < optc1->min_h_sync_width ||
+ timing->v_sync_width < optc1->min_v_sync_width)
return false;
- min_v_blank = timing->flags.INTERLACE?tgn10->min_v_blank_interlace:tgn10->min_v_blank;
+ min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
if (v_blank < min_v_blank)
return false;
@@ -532,15 +520,15 @@ static bool tgn10_validate_timing(
* holds the counter of frames.
*
* @param
- * struct timing_generator *tg - [in] timing generator which controls the
+ * struct timing_generator *optc - [in] timing generator which controls the
* desired CRTC
*
* @return
* Counter of frames, which should equal to number of vblanks.
*/
-static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t frame_count;
REG_GET(OTG_STATUS_FRAME_COUNT,
@@ -549,34 +537,34 @@ static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
return frame_count;
}
-static void tgn10_lock(struct timing_generator *tg)
+void optc1_lock(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, tg->inst);
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
}
-static void tgn10_unlock(struct timing_generator *tg)
+void optc1_unlock(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
}
-static void tgn10_get_position(struct timing_generator *tg,
+void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET_2(OTG_STATUS_POSITION,
OTG_HORZ_COUNT, &position->horizontal_count,
@@ -586,12 +574,12 @@ static void tgn10_get_position(struct timing_generator *tg,
OTG_VERT_COUNT_NOM, &position->nominal_vcount);
}
-static bool tgn10_is_counter_moving(struct timing_generator *tg)
+bool optc1_is_counter_moving(struct timing_generator *optc)
{
struct crtc_position position1, position2;
- tg->funcs->get_position(tg, &position1);
- tg->funcs->get_position(tg, &position2);
+ optc->funcs->get_position(optc, &position1);
+ optc->funcs->get_position(optc, &position2);
if (position1.horizontal_count == position2.horizontal_count &&
position1.vertical_count == position2.vertical_count)
@@ -600,10 +588,10 @@ static bool tgn10_is_counter_moving(struct timing_generator *tg)
return true;
}
-static bool tgn10_did_triggered_reset_occur(
- struct timing_generator *tg)
+bool optc1_did_triggered_reset_occur(
+ struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t occurred_force, occurred_vsync;
REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
@@ -615,9 +603,9 @@ static bool tgn10_did_triggered_reset_occur(
return occurred_vsync != 0 || occurred_force != 0;
}
-static void tgn10_disable_reset_trigger(struct timing_generator *tg)
+void optc1_disable_reset_trigger(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_WRITE(OTG_TRIGA_CNTL, 0);
@@ -628,9 +616,9 @@ static void tgn10_disable_reset_trigger(struct timing_generator *tg)
OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 1);
}
-static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_tg_inst)
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge;
REG_GET(OTG_V_SYNC_A_CNTL,
@@ -662,12 +650,12 @@ static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_t
OTG_FORCE_COUNT_NOW_MODE, 2);
}
-void tgn10_enable_crtc_reset(
- struct timing_generator *tg,
+void optc1_enable_crtc_reset(
+ struct timing_generator *optc,
int source_tg_inst,
struct crtc_trigger_info *crtc_tp)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge = 0;
uint32_t rising_edge = 0;
@@ -707,10 +695,10 @@ void tgn10_enable_crtc_reset(
}
}
-static void tgn10_wait_for_state(struct timing_generator *tg,
+void optc1_wait_for_state(struct timing_generator *optc,
enum crtc_state state)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
switch (state) {
case CRTC_STATE_VBLANK:
@@ -730,8 +718,8 @@ static void tgn10_wait_for_state(struct timing_generator *tg,
}
}
-static void tgn10_set_early_control(
- struct timing_generator *tg,
+void optc1_set_early_control(
+ struct timing_generator *optc,
uint32_t early_cntl)
{
/* asic design change, do not need this control
@@ -740,11 +728,11 @@ static void tgn10_set_early_control(
}
-static void tgn10_set_static_screen_control(
- struct timing_generator *tg,
+void optc1_set_static_screen_control(
+ struct timing_generator *optc,
uint32_t value)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
@@ -769,11 +757,11 @@ static void tgn10_set_static_screen_control(
*
*****************************************************************************
*/
-static void tgn10_set_drr(
- struct timing_generator *tg,
+void optc1_set_drr(
+ struct timing_generator *optc,
const struct drr_params *params)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (params != NULL &&
params->vertical_total_max > 0 &&
@@ -806,15 +794,15 @@ static void tgn10_set_drr(
}
}
-static void tgn10_set_test_pattern(
- struct timing_generator *tg,
+static void optc1_set_test_pattern(
+ struct timing_generator *optc,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
* encoder) */
enum controller_dp_test_pattern test_pattern,
enum dc_color_depth color_depth)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
enum test_pattern_color_format bit_depth;
enum test_pattern_dyn_range dyn_range;
enum test_pattern_mode mode;
@@ -1065,35 +1053,30 @@ static void tgn10_set_test_pattern(
}
}
-static void tgn10_get_crtc_scanoutpos(
- struct timing_generator *tg,
+void optc1_get_crtc_scanoutpos(
+ struct timing_generator *optc,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct crtc_position position;
REG_GET_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, v_blank_start,
OTG_V_BLANK_END, v_blank_end);
- tgn10_get_position(tg, &position);
+ optc1_get_position(optc, &position);
*h_position = position.horizontal_count;
*v_position = position.vertical_count;
}
-
-
-static void tgn10_enable_stereo(struct timing_generator *tg,
+static void optc1_enable_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
-
- uint32_t active_width = timing->h_addressable;
- uint32_t space1_size = timing->v_total - timing->v_addressable;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (flags) {
uint32_t stereo_en;
@@ -1121,29 +1104,23 @@ static void tgn10_enable_stereo(struct timing_generator *tg,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
}
-
- REG_UPDATE(OPPBUF_CONTROL,
- OPPBUF_ACTIVE_WIDTH, active_width);
-
- REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
- OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
}
-static void tgn10_program_stereo(struct timing_generator *tg,
+void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
if (flags->PROGRAM_STEREO)
- tgn10_enable_stereo(tg, timing, flags);
+ optc1_enable_stereo(optc, timing, flags);
else
- tgn10_disable_stereo(tg);
+ optc1_disable_stereo(optc);
}
-static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
+bool optc1_is_stereo_left_eye(struct timing_generator *optc)
{
bool ret = false;
uint32_t left_eye = 0;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_STEREO_STATUS,
OTG_STEREO_CURRENT_EYE, &left_eye);
@@ -1155,7 +1132,7 @@ static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
return ret;
}
-void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s)
{
REG_GET(OTG_CONTROL,
@@ -1199,17 +1176,22 @@ void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
-static void tgn10_tg_init(struct timing_generator *tg)
+static void optc1_clear_optc_underflow(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- tgn10_set_blank_data_double_buffer(tg, true);
REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
}
-static bool tgn10_is_tg_enabled(struct timing_generator *tg)
+static void optc1_tg_init(struct timing_generator *optc)
+{
+ optc1_set_blank_data_double_buffer(optc, true);
+ optc1_clear_optc_underflow(optc);
+}
+
+static bool optc1_is_tg_enabled(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t otg_enabled = 0;
REG_GET(OTG_CONTROL, OTG_MASTER_EN, &otg_enabled);
@@ -1217,50 +1199,65 @@ static bool tgn10_is_tg_enabled(struct timing_generator *tg)
return (otg_enabled != 0);
}
+
+static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t underflow_occurred = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS,
+ &underflow_occurred);
+
+ return (underflow_occurred == 1);
+}
+
static const struct timing_generator_funcs dcn10_tg_funcs = {
- .validate_timing = tgn10_validate_timing,
- .program_timing = tgn10_program_timing,
- .program_global_sync = tgn10_program_global_sync,
- .enable_crtc = tgn10_enable_crtc,
- .disable_crtc = tgn10_disable_crtc,
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc1_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
- .is_counter_moving = tgn10_is_counter_moving,
- .get_position = tgn10_get_position,
- .get_frame_count = tgn10_get_vblank_counter,
- .get_scanoutpos = tgn10_get_crtc_scanoutpos,
- .set_early_control = tgn10_set_early_control,
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
- .wait_for_state = tgn10_wait_for_state,
- .set_blank = tgn10_set_blank,
- .is_blanked = tgn10_is_blanked,
- .set_blank_color = tgn10_program_blank_color,
- .did_triggered_reset_occur = tgn10_did_triggered_reset_occur,
- .enable_reset_trigger = tgn10_enable_reset_trigger,
- .enable_crtc_reset = tgn10_enable_crtc_reset,
- .disable_reset_trigger = tgn10_disable_reset_trigger,
- .lock = tgn10_lock,
- .unlock = tgn10_unlock,
- .enable_optc_clock = tgn10_enable_optc_clock,
- .set_drr = tgn10_set_drr,
- .set_static_screen_control = tgn10_set_static_screen_control,
- .set_test_pattern = tgn10_set_test_pattern,
- .program_stereo = tgn10_program_stereo,
- .is_stereo_left_eye = tgn10_is_stereo_left_eye,
- .set_blank_data_double_buffer = tgn10_set_blank_data_double_buffer,
- .tg_init = tgn10_tg_init,
- .is_tg_enabled = tgn10_is_tg_enabled,
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank = optc1_set_blank,
+ .is_blanked = optc1_is_blanked,
+ .set_blank_color = optc1_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc1_lock,
+ .unlock = optc1_unlock,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .set_test_pattern = optc1_set_test_pattern,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
+ .tg_init = optc1_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
};
-void dcn10_timing_generator_init(struct dcn10_timing_generator *tgn10)
+void dcn10_timing_generator_init(struct optc *optc1)
{
- tgn10->base.funcs = &dcn10_tg_funcs;
+ optc1->base.funcs = &dcn10_tg_funcs;
- tgn10->max_h_total = tgn10->tg_mask->OTG_H_TOTAL + 1;
- tgn10->max_v_total = tgn10->tg_mask->OTG_V_TOTAL + 1;
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
- tgn10->min_h_blank = 32;
- tgn10->min_v_blank = 3;
- tgn10->min_v_blank_interlace = 5;
- tgn10->min_h_sync_width = 8;
- tgn10->min_v_sync_width = 1;
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 8;
+ optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index bb1cbfdc3554..a3c7c2012f05 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -29,7 +29,7 @@
#include "timing_generator.h"
#define DCN10TG_FROM_TG(tg)\
- container_of(tg, struct dcn10_timing_generator, base)
+ container_of(tg, struct optc, base)
#define TG_COMMON_REG_LIST_DCN(inst) \
SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
@@ -70,8 +70,6 @@
SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
- SRI(OPPBUF_CONTROL, OPPBUF, inst),\
- SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
SRI(CONTROL, VTG, inst),\
SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
@@ -84,7 +82,7 @@
SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
-struct dcn_tg_registers {
+struct dcn_optc_registers {
uint32_t OTG_VERT_SYNC_CONTROL;
uint32_t OTG_MASTER_UPDATE_MODE;
uint32_t OTG_GSL_CONTROL;
@@ -129,9 +127,11 @@ struct dcn_tg_registers {
uint32_t OPTC_INPUT_CLOCK_CONTROL;
uint32_t OPTC_DATA_SOURCE_SELECT;
uint32_t OPTC_INPUT_GLOBAL_CONTROL;
- uint32_t OPPBUF_CONTROL;
- uint32_t OPPBUF_3D_PARAMETERS_0;
uint32_t CONTROL;
+ uint32_t OTG_GSL_WINDOW_X;
+ uint32_t OTG_GSL_WINDOW_Y;
+ uint32_t OTG_VUPDATE_KEEPOUT;
+ uint32_t OTG_DSC_START_POSITION;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -211,8 +211,6 @@ struct dcn_tg_registers {
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
- SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
- SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -332,8 +330,6 @@ struct dcn_tg_registers {
type OPTC_SEG0_SRC_SEL;\
type OPTC_UNDERFLOW_OCCURRED_STATUS;\
type OPTC_UNDERFLOW_CLEAR;\
- type OPPBUF_ACTIVE_WIDTH;\
- type OPPBUF_3D_VACT_SPACE1_SIZE;\
type VTG0_ENABLE;\
type VTG0_FP2;\
type VTG0_VCOUNT_INIT;\
@@ -346,22 +342,35 @@ struct dcn_tg_registers {
type OTG_GSL2_EN;\
type OTG_GSL_MASTER_EN;\
type OTG_GSL_FORCE_DELAY;\
- type OTG_GSL_CHECK_ALL_FIELDS;
+ type OTG_GSL_CHECK_ALL_FIELDS;\
+ type OTG_GSL_WINDOW_START_X;\
+ type OTG_GSL_WINDOW_END_X;\
+ type OTG_GSL_WINDOW_START_Y;\
+ type OTG_GSL_WINDOW_END_Y;\
+ type OTG_RANGE_TIMING_DBUF_UPDATE_MODE;\
+ type OTG_GSL_MASTER_MODE;\
+ type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
+ type OTG_DSC_START_POSITION_X;\
+ type OTG_DSC_START_POSITION_LINE_NUM;\
+ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
-struct dcn_tg_shift {
+
+struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
};
-struct dcn_tg_mask {
+struct dcn_optc_mask {
TG_REG_FIELD_LIST(uint32_t)
};
-struct dcn10_timing_generator {
+struct optc {
struct timing_generator base;
- const struct dcn_tg_registers *tg_regs;
- const struct dcn_tg_shift *tg_shift;
- const struct dcn_tg_mask *tg_mask;
+ const struct dcn_optc_registers *tg_regs;
+ const struct dcn_optc_shift *tg_shift;
+ const struct dcn_optc_mask *tg_mask;
enum controller_id controller_id;
@@ -376,7 +385,7 @@ struct dcn10_timing_generator {
uint32_t min_v_blank_interlace;
};
-void dcn10_timing_generator_init(struct dcn10_timing_generator *tg);
+void dcn10_timing_generator_init(struct optc *optc);
struct dcn_otg_state {
uint32_t v_blank_start;
@@ -397,7 +406,77 @@ struct dcn_otg_state {
uint32_t otg_enabled;
};
-void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
+bool optc1_validate_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *timing);
+
+void optc1_program_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ bool use_vbios);
+
+void optc1_program_global_sync(
+ struct timing_generator *optc);
+
+bool optc1_disable_crtc(struct timing_generator *optc);
+
+bool optc1_is_counter_moving(struct timing_generator *optc);
+
+void optc1_get_position(struct timing_generator *optc,
+ struct crtc_position *position);
+
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc);
+
+void optc1_get_crtc_scanoutpos(
+ struct timing_generator *optc,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+void optc1_set_early_control(
+ struct timing_generator *optc,
+ uint32_t early_cntl);
+
+void optc1_wait_for_state(struct timing_generator *optc,
+ enum crtc_state state);
+
+void optc1_set_blank(struct timing_generator *optc,
+ bool enable_blanking);
+
+bool optc1_is_blanked(struct timing_generator *optc);
+
+void optc1_program_blank_color(
+ struct timing_generator *optc,
+ const struct tg_color *black_color);
+
+bool optc1_did_triggered_reset_occur(
+ struct timing_generator *optc);
+
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst);
+
+void optc1_disable_reset_trigger(struct timing_generator *optc);
+
+void optc1_lock(struct timing_generator *optc);
+
+void optc1_unlock(struct timing_generator *optc);
+
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable);
+
+void optc1_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params);
+
+void optc1_set_static_screen_control(
+ struct timing_generator *optc,
+ uint32_t value);
+
+void optc1_program_stereo(struct timing_generator *optc,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
+
+bool optc1_is_stereo_left_eye(struct timing_generator *optc);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 10cce51d31d2..44825e2c9ebb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -34,7 +34,7 @@
#include "dcn10/dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
#include "dcn10/dcn10_dpp.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_opp.h"
@@ -348,18 +348,18 @@ static const struct dcn_mpc_mask mpc_mask = {
#define tg_regs(id)\
[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
-static const struct dcn_tg_registers tg_regs[] = {
+static const struct dcn_optc_registers tg_regs[] = {
tg_regs(0),
tg_regs(1),
tg_regs(2),
tg_regs(3),
};
-static const struct dcn_tg_shift tg_shift = {
+static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
};
-static const struct dcn_tg_mask tg_mask = {
+static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
@@ -553,8 +553,8 @@ static struct timing_generator *dcn10_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
- struct dcn10_timing_generator *tgn10 =
- kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -678,6 +678,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN10_253 = true;
+ hws->wa.false_optc_underflow = true;
}
return hws;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 4c31fa54af39..c109b2c34c8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,35 +35,6 @@ static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum
soc->writeback_latency_us = 12.0;
soc->ideal_dram_bw_after_urgent_percent = 80.0;
soc->max_request_size_bytes = 256;
-
- soc->vmin.dcfclk_mhz = 300.0;
- soc->vmin.dispclk_mhz = 608.0;
- soc->vmin.dppclk_mhz = 435.0;
- soc->vmin.dram_bw_per_chan_gbps = 12.8;
- soc->vmin.phyclk_mhz = 540.0;
- soc->vmin.socclk_mhz = 208.0;
-
- soc->vmid.dcfclk_mhz = 600.0;
- soc->vmid.dispclk_mhz = 661.0;
- soc->vmid.dppclk_mhz = 661.0;
- soc->vmid.dram_bw_per_chan_gbps = 12.8;
- soc->vmid.phyclk_mhz = 540.0;
- soc->vmid.socclk_mhz = 208.0;
-
- soc->vnom.dcfclk_mhz = 600.0;
- soc->vnom.dispclk_mhz = 661.0;
- soc->vnom.dppclk_mhz = 661.0;
- soc->vnom.dram_bw_per_chan_gbps = 38.4;
- soc->vnom.phyclk_mhz = 810;
- soc->vnom.socclk_mhz = 208.0;
-
- soc->vmax.dcfclk_mhz = 600.0;
- soc->vmax.dispclk_mhz = 1086.0;
- soc->vmax.dppclk_mhz = 661.0;
- soc->vmax.dram_bw_per_chan_gbps = 38.4;
- soc->vmax.phyclk_mhz = 810.0;
- soc->vmax.socclk_mhz = 208.0;
-
soc->downspread_percent = 0.5;
soc->dram_page_open_time_ns = 50.0;
soc->dram_rw_turnaround_time_ns = 17.5;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 2d9d6298f0d3..aeebd8bee628 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -79,10 +79,6 @@ struct _vcs_dpi_soc_bounding_box_st {
double writeback_latency_us;
double ideal_dram_bw_after_urgent_percent;
unsigned int max_request_size_bytes;
- struct _vcs_dpi_voltage_scaling_st vmin;
- struct _vcs_dpi_voltage_scaling_st vmid;
- struct _vcs_dpi_voltage_scaling_st vnom;
- struct _vcs_dpi_voltage_scaling_st vmax;
double downspread_percent;
double dram_page_open_time_ns;
double dram_rw_turnaround_time_ns;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 1f337ecfeab0..260e113fcc02 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -28,6 +28,15 @@
#include "dml_inline_defs.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
index 8ba962df42e6..325dd2b757d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
@@ -27,6 +27,15 @@
#include "display_mode_vba.h"
#include "display_rq_dlg_calc.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 1e4b1e383401..c2037daa8e66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -28,6 +28,15 @@
#include "dml_inline_defs.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
index bc7d8c707221..324239c77958 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
@@ -27,6 +27,16 @@
#include "dc_features.h"
#include "dml_inline_defs.h"
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
{
to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d680b565af6f..d6971054ec07 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,7 +212,6 @@ struct pipe_ctx {
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
#endif
- struct dwbc *dwbc;
};
struct resource_context {
@@ -241,6 +240,7 @@ struct dce_bw_output {
struct dcn_bw_clocks {
int dispclk_khz;
+ int dppclk_khz;
bool dppclk_div;
int dcfclk_khz;
int dcfclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 1e231f6de732..132d18d4b293 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -349,10 +349,10 @@ struct dcn_bw_internal_vars {
float dst_x_after_scaler;
float dst_y_after_scaler;
float time_calc;
- float v_update_offset[number_of_planes_minus_one + 1];
+ float v_update_offset[number_of_planes_minus_one + 1][2];
float total_repeater_delay;
- float v_update_width[number_of_planes_minus_one + 1];
- float v_ready_offset[number_of_planes_minus_one + 1];
+ float v_update_width[number_of_planes_minus_one + 1][2];
+ float v_ready_offset[number_of_planes_minus_one + 1][2];
float time_setup;
float extra_latency;
float maximum_vstartup;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 48217ecfabd4..a83a48494613 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -50,9 +50,9 @@ struct abm_funcs {
bool (*set_backlight_level)(struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
- unsigned int controller_id);
+ unsigned int controller_id,
+ bool use_smooth_brightness);
unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
- bool (*is_dmcu_initialized)(struct abm *abm);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index b59712b41b81..ce206355461b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -63,6 +63,7 @@ struct dmcu_funcs {
unsigned int wait_loop_number);
void (*get_psr_wait_loop)(struct dmcu *dmcu,
unsigned int *psr_wait_loop_number);
+ bool (*is_dmcu_initialized)(struct dmcu *dmcu);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index ccb4896975c2..25edbde6163e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -68,7 +68,7 @@ struct dpp_funcs {
void (*dpp_set_csc_adjustment)(
struct dpp *dpp,
- const struct out_csc_color_matrix *tbl_entry);
+ const uint16_t *regval);
void (*dpp_power_on_regamma_lut)(
struct dpp *dpp,
@@ -122,7 +122,7 @@ struct dpp_funcs {
void (*set_cursor_attributes)(
struct dpp *dpp_base,
- const struct dc_cursor_attributes *attr);
+ enum dc_cursor_color_format color_format);
void (*set_cursor_position)(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 49b12f602e79..b7c7e70022e4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -28,6 +28,20 @@
#include "mem_input.h"
+
+enum cursor_pitch {
+ CURSOR_PITCH_64_PIXELS = 0,
+ CURSOR_PITCH_128_PIXELS,
+ CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+ CURSOR_LINE_PER_CHUNK_2 = 1,
+ CURSOR_LINE_PER_CHUNK_4,
+ CURSOR_LINE_PER_CHUNK_8,
+ CURSOR_LINE_PER_CHUNK_16
+};
+
struct hubp {
struct hubp_funcs *funcs;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index ddc56700109b..e3f0b4056318 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -126,36 +126,12 @@ struct default_adjustment {
bool force_hw_default;
};
-struct out_csc_color_matrix {
- enum dc_color_space color_space;
- uint16_t regval[12];
-};
-struct output_csc_matrix {
+struct out_csc_color_matrix {
enum dc_color_space color_space;
uint16_t regval[12];
};
-static const struct output_csc_matrix output_csc_matrix[] = {
- { COLOR_SPACE_SRGB,
- { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- { COLOR_SPACE_SRGB_LIMITED,
- { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
- { COLOR_SPACE_YCBCR601,
- { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
- 0xF6B7, 0xE04, 0x1004} },
- { COLOR_SPACE_YCBCR709,
- { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
- 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
-
- /* TODO: correct values below */
- { COLOR_SPACE_YCBCR601_LIMITED,
- { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
- 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
- { COLOR_SPACE_YCBCR709_LIMITED,
- { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
- 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
-};
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
@@ -178,4 +154,41 @@ struct dc_bias_and_scale {
uint16_t bias_blue;
};
+enum test_pattern_dyn_range {
+ TEST_PATTERN_DYN_RANGE_VESA = 0,
+ TEST_PATTERN_DYN_RANGE_CEA
+};
+
+enum test_pattern_mode {
+ TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
+ TEST_PATTERN_MODE_VERTICALBARS,
+ TEST_PATTERN_MODE_HORIZONTALBARS,
+ TEST_PATTERN_MODE_SINGLERAMP_RGB,
+ TEST_PATTERN_MODE_DUALRAMP_RGB
+};
+
+enum test_pattern_color_format {
+ TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
+ TEST_PATTERN_COLOR_FORMAT_BPC_8,
+ TEST_PATTERN_COLOR_FORMAT_BPC_10,
+ TEST_PATTERN_COLOR_FORMAT_BPC_12
+};
+
+enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_D102 = 0,
+ CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
+ CONTROLLER_DP_TEST_PATTERN_PRBS7,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+};
+
#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 72ea33526a5c..23a8d5e53a89 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -26,7 +26,10 @@
#define __DC_MPCC_H__
#include "dc_hw_types.h"
-#include "opp.h"
+#include "hw_shared.h"
+
+#define MAX_MPCC 6
+#define MAX_OPP 6
enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_DISABLE = 0,
@@ -34,45 +37,151 @@ enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_COEF_B
};
-struct mpcc_cfg {
- int dpp_id;
- int opp_id;
- struct mpc_tree_cfg *tree_cfg;
- unsigned int z_index;
- struct tg_color black_color;
- bool per_pixel_alpha;
- bool pre_multiplied_alpha;
+enum mpcc_blend_mode {
+ MPCC_BLEND_MODE_BYPASS,
+ MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH,
+ MPCC_BLEND_MODE_TOP_LAYER_ONLY,
+ MPCC_BLEND_MODE_TOP_BOT_BLENDING
+};
+
+enum mpcc_alpha_blend_mode {
+ MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA,
+ MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN,
+ MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
+};
+
+/*
+ * MPCC blending configuration
+ */
+struct mpcc_blnd_cfg {
+ struct tg_color black_color; /* background color */
+ enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */
+ bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */
+ int global_gain;
+ int global_alpha;
+ bool overlap_only;
+
+};
+
+struct mpcc_sm_cfg {
+ bool enable;
+ /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
+ int sm_mode;
+ /* 0- disable frame alternate, 1- enable frame alternate */
+ bool frame_alt;
+ /* 0- disable field alternate, 1- enable field alternate */
+ bool field_alt;
+ /* 0-no force,2-force frame polarity from top,3-force frame polarity from bottom */
+ int force_next_frame_porlarity;
+ /* 0-no force,2-force field polarity from top,3-force field polarity from bottom */
+ int force_next_field_polarity;
+};
+
+/*
+ * MPCC connection and blending configuration for a single MPCC instance.
+ * This struct is used as a node in an MPC tree.
+ */
+struct mpcc {
+ int mpcc_id; /* MPCC physical instance */
+ int dpp_id; /* DPP input to this MPCC */
+ struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
+ struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
+ struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
+};
+
+/*
+ * MPC tree represents all MPCC connections for a pipe.
+ */
+struct mpc_tree {
+ int opp_id; /* The OPP instance that owns this MPC tree */
+ struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
};
struct mpc {
const struct mpc_funcs *funcs;
struct dc_context *ctx;
+
+ struct mpcc mpcc_array[MAX_MPCC];
};
struct mpc_funcs {
- int (*add)(struct mpc *mpc, struct mpcc_cfg *cfg);
+ /*
+ * Insert DPP into MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for OPP output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+ struct mpcc* (*insert_plane)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
- void (*remove)(struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int mpcc_inst);
+ /*
+ * Remove a specified MPCC from the MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return: void
+ */
+ void (*remove_mpcc)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
- void (*wait_for_idle)(struct mpc *mpc, int id);
+ /*
+ * Reset the MPCC HW status by disconnecting all muxes.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ *
+ * Return: void
+ */
+ void (*mpc_init)(struct mpc *mpc);
- void (*update_blend_mode)(struct mpc *mpc, struct mpcc_cfg *cfg);
+ /*
+ * Update the blending configuration for a specified MPCC.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in] blnd_cfg - MPCC blending configuration.
+ * [in] mpcc_id - The MPCC physical instance.
+ *
+ * Return: void
+ */
+ void (*update_blending)(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id);
- int (*get_opp_id)(struct mpc *mpc, int mpcc_id);
+ struct mpcc* (*get_mpcc_for_dpp)(
+ struct mpc_tree *tree,
+ int dpp_id);
+
+ void (*wait_for_idle)(struct mpc *mpc, int id);
- void (*set_output_csc)(struct mpc *mpc,
- int opp_id,
- const struct out_csc_color_matrix *tbl_entry,
- enum mpc_output_csc_mode ocsc_mode);
+ void (*assert_mpcc_idle_before_connect)(struct mpc *mpc, int mpcc_id);
- void (*set_ocsc_default)(struct mpc *mpc,
- int opp_id,
- enum dc_color_space color_space,
- enum mpc_output_csc_mode ocsc_mode);
+ void (*init_mpcc_list_from_hw)(
+ struct mpc *mpc,
+ struct mpc_tree *tree);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 579d1059a3d4..ab8fb77f1ae5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -29,6 +29,7 @@
#include "hw_shared.h"
#include "dc_hw_types.h"
#include "transform.h"
+#include "mpc.h"
struct fixed31_32;
@@ -204,7 +205,7 @@ struct output_pixel_processor {
struct dc_context *ctx;
uint32_t inst;
struct pwl_params regamma_params;
- struct mpc_tree_cfg mpc_tree;
+ struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
};
@@ -248,6 +249,21 @@ enum ovl_csc_adjust_item {
OVERLAY_COLOR_TEMPERATURE
};
+enum oppbuf_display_segmentation {
+ OPPBUF_DISPLAY_SEGMENTATION_1_SEGMENT = 0,
+ OPPBUF_DISPLAY_SEGMENTATION_2_SEGMENT = 1,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT = 2,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT_SPLIT_LEFT = 3,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT_SPLIT_RIGHT = 4
+};
+
+struct oppbuf_params {
+ uint32_t active_width;
+ enum oppbuf_display_segmentation mso_segmentation;
+ uint32_t mso_overlap_pixel_num;
+ uint32_t pixel_repetition;
+};
+
struct opp_funcs {
@@ -276,26 +292,11 @@ struct opp_funcs {
void (*opp_destroy)(struct output_pixel_processor **opp);
- void (*opp_set_stereo_polarity)(
- struct output_pixel_processor *opp,
- bool enable,
- bool rightEyePolarity);
-
- void (*opp_set_test_pattern)(
- struct output_pixel_processor *opp,
- bool enable);
-
- void (*opp_dpg_blank_enable)(
- struct output_pixel_processor *opp,
- bool enable,
- const struct tg_color *color,
- int width,
- int height);
-
- void (*opp_convert_pti)(
+ void (*opp_program_stereo)(
struct output_pixel_processor *opp,
bool enable,
- bool polarity);
+ const struct dc_crtc_timing *timing);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 860259913d78..ec312f1a3e55 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -26,6 +26,8 @@
#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
#define __DAL_TIMING_GENERATOR_TYPES_H__
+#include "hw_shared.h"
+
struct dc_bios;
/* Contains CRTC vertical/horizontal pixel counters */
@@ -40,6 +42,19 @@ struct dcp_gsl_params {
int gsl_master;
};
+struct gsl_params {
+ int gsl0_en;
+ int gsl1_en;
+ int gsl2_en;
+ int gsl_master_en;
+ int gsl_master_mode;
+ int master_update_lock_gsl_en;
+ int gsl_window_start_x;
+ int gsl_window_end_x;
+ int gsl_window_start_y;
+ int gsl_window_end_y;
+};
+
/* define the structure of Dynamic Refresh Mode */
struct drr_params {
uint32_t vertical_total_min;
@@ -50,43 +65,6 @@ struct drr_params {
#define LEFT_EYE_3D_PRIMARY_SURFACE 1
#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
-enum test_pattern_dyn_range {
- TEST_PATTERN_DYN_RANGE_VESA = 0,
- TEST_PATTERN_DYN_RANGE_CEA
-};
-
-enum test_pattern_mode {
- TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
- TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
- TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
- TEST_PATTERN_MODE_VERTICALBARS,
- TEST_PATTERN_MODE_HORIZONTALBARS,
- TEST_PATTERN_MODE_SINGLERAMP_RGB,
- TEST_PATTERN_MODE_DUALRAMP_RGB
-};
-
-enum test_pattern_color_format {
- TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
- TEST_PATTERN_COLOR_FORMAT_BPC_8,
- TEST_PATTERN_COLOR_FORMAT_BPC_10,
- TEST_PATTERN_COLOR_FORMAT_BPC_12
-};
-
-enum controller_dp_test_pattern {
- CONTROLLER_DP_TEST_PATTERN_D102 = 0,
- CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
- CONTROLLER_DP_TEST_PATTERN_PRBS7,
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
- CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
- CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
-};
-
enum crtc_state {
CRTC_STATE_VBLANK = 0,
CRTC_STATE_VACTIVE
@@ -100,6 +78,12 @@ struct _dlg_otg_param {
enum signal_type signal;
};
+struct vupdate_keepout_params {
+ int start_offset;
+ int end_offset;
+ int enable;
+};
+
struct crtc_stereo_flags {
uint8_t PROGRAM_STEREO : 1;
uint8_t PROGRAM_POLARITY : 1;
@@ -187,6 +171,8 @@ struct timing_generator_funcs {
void (*tg_init)(struct timing_generator *tg);
bool (*is_tg_enabled)(struct timing_generator *tg);
+ bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
+ void (*clear_optc_underflow)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 5dc4ecf618ff..4c0aa56f7bae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -28,6 +28,7 @@
#include "dc_types.h"
#include "clock_source.h"
#include "inc/hw/timing_generator.h"
+#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "core_status.h"
@@ -40,6 +41,7 @@ enum pipe_gating_control {
struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
+ bool false_optc_underflow;
};
struct hwseq_wa_state {
@@ -137,10 +139,6 @@ struct hw_sequencer_funcs {
void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
- void (*enable_plane)(struct dc *dc,
- struct pipe_ctx *pipe,
- struct dc_state *context);
-
void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
@@ -198,6 +196,7 @@ struct hw_sequencer_funcs {
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
+ void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
};
@@ -209,4 +208,8 @@ void color_space_to_black_color(
bool hwss_wait_for_blank_complete(
struct timing_generator *tg);
+const uint16_t *find_color_matrix(
+ enum dc_color_space color_space,
+ uint32_t *array_size);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a87c0329541f..1fcbc99e63b5 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -26,8 +26,6 @@
#ifndef _OS_TYPES_H_
#define _OS_TYPES_H_
-#if defined __KERNEL__
-
#include <asm/byteorder.h>
#include <linux/types.h>
#include <drm/drmP.h>
@@ -46,14 +44,12 @@
#undef WRITE
#undef FRAME_SIZE
-#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+#define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
-
-#define dm_vlog(fmt, args) vprintk(fmt, args)
-
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include <asm/fpu/api.h>
#endif
/*
@@ -89,8 +85,4 @@
BREAK_TO_DEBUGGER(); \
} while (0)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include <asm/fpu/api.h>
-#endif
-
#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 3248f699daf2..4badaedbaadd 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -463,4 +463,11 @@ uint32_t dal_fixed31_32_u2d19(
uint32_t dal_fixed31_32_u0d19(
struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_clamp_u0d14(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_clamp_u0d10(
+ struct fixed31_32 arg);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 03a7a9ca95ea..c4197432eb7c 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -233,10 +233,6 @@ static inline struct graphics_object_id dal_graphics_object_id_init(
return result;
}
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2);
-
/* Based on internal data members memory layout */
static inline uint32_t dal_graphics_object_id_to_uint(
struct graphics_object_id id)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
index 663d3af35baf..5bf84c6d0ec3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
@@ -436,7 +436,6 @@
#define mmTA_CNTL_DEFAULT 0x8004d850
#define mmTA_CNTL_AUX_DEFAULT 0x00000000
#define mmTA_RESERVED_010C_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_DEFAULT 0x40000040
#define mmTA_STATUS_DEFAULT 0x00000000
#define mmTA_SCRATCH_DEFAULT 0x00000000
@@ -1700,7 +1699,6 @@
#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
#define mmDB_STENCIL_WRITE_BASE_HI_DEFAULT 0x00000000
#define mmDB_DFSM_CONTROL_DEFAULT 0x00000000
-#define mmDB_RENDER_FILTER_DEFAULT 0x00000000
#define mmDB_Z_INFO2_DEFAULT 0x00000000
#define mmDB_STENCIL_INFO2_DEFAULT 0x00000000
#define mmTA_BC_BASE_ADDR_DEFAULT 0x00000000
@@ -1806,8 +1804,6 @@
#define mmPA_SC_RIGHT_VERT_GRID_DEFAULT 0x00000000
#define mmPA_SC_LEFT_VERT_GRID_DEFAULT 0x00000000
#define mmPA_SC_HORIZ_GRID_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_LR_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_TB_DEFAULT 0x00000000
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_DEFAULT 0x00000000
#define mmCB_BLEND_RED_DEFAULT 0x00000000
#define mmCB_BLEND_GREEN_DEFAULT 0x00000000
@@ -2072,7 +2068,6 @@
#define mmVGT_EVENT_INITIATOR_DEFAULT 0x00000000
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_DEFAULT 0x00000000
#define mmVGT_DRAW_PAYLOAD_CNTL_DEFAULT 0x00000000
-#define mmVGT_INDEX_PAYLOAD_CNTL_DEFAULT 0x00000000
#define mmVGT_INSTANCE_STEP_RATE_0_DEFAULT 0x00000000
#define mmVGT_INSTANCE_STEP_RATE_1_DEFAULT 0x00000000
#define mmVGT_ESGS_RING_ITEMSIZE_DEFAULT 0x00000000
@@ -2490,7 +2485,6 @@
#define mmWD_INDEX_BUF_BASE_DEFAULT 0x00000000
#define mmWD_INDEX_BUF_BASE_HI_DEFAULT 0x00000000
#define mmIA_MULTI_VGT_PARAM_DEFAULT 0x006000ff
-#define mmVGT_OBJECT_ID_DEFAULT 0x00000000
#define mmVGT_INSTANCE_BASE_ID_DEFAULT 0x00000000
#define mmPA_SU_LINE_STIPPLE_VALUE_DEFAULT 0x00000000
#define mmPA_SC_LINE_STIPPLE_STATE_DEFAULT 0x00000000
@@ -2534,7 +2528,6 @@
#define mmSQC_WRITEBACK_DEFAULT 0x00000000
#define mmTA_CS_BC_BASE_ADDR_DEFAULT 0x00000000
#define mmTA_CS_BC_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_UCONFIG_DEFAULT 0x40000040
#define mmDB_OCCLUSION_COUNT0_LOW_DEFAULT 0x00000000
#define mmDB_OCCLUSION_COUNT0_HI_DEFAULT 0x00000000
#define mmDB_OCCLUSION_COUNT1_LOW_DEFAULT 0x00000000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index e6d6171aa8b9..4ce090db7ef7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -841,8 +841,6 @@
#define mmTA_CNTL_AUX_BASE_IDX 0
#define mmTA_RESERVED_010C 0x0543
#define mmTA_RESERVED_010C_BASE_IDX 0
-#define mmTA_GRAD_ADJ 0x0544
-#define mmTA_GRAD_ADJ_BASE_IDX 0
#define mmTA_STATUS 0x0548
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
@@ -3330,8 +3328,6 @@
#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
#define mmDB_DFSM_CONTROL 0x0018
#define mmDB_DFSM_CONTROL_BASE_IDX 1
-#define mmDB_RENDER_FILTER 0x0019
-#define mmDB_RENDER_FILTER_BASE_IDX 1
#define mmDB_Z_INFO2 0x001a
#define mmDB_Z_INFO2_BASE_IDX 1
#define mmDB_STENCIL_INFO2 0x001b
@@ -3542,10 +3538,6 @@
#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
#define mmPA_SC_HORIZ_GRID 0x00ea
#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_LR 0x00eb
-#define mmPA_SC_FOV_WINDOW_LR_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_TB 0x00ec
-#define mmPA_SC_FOV_WINDOW_TB_BASE_IDX 1
#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
#define mmCB_BLEND_RED 0x0105
@@ -4074,8 +4066,6 @@
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
-#define mmVGT_INDEX_PAYLOAD_CNTL 0x02a7
-#define mmVGT_INDEX_PAYLOAD_CNTL_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
@@ -4908,8 +4898,6 @@
#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
#define mmIA_MULTI_VGT_PARAM 0x2258
#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
-#define mmVGT_OBJECT_ID 0x2259
-#define mmVGT_OBJECT_ID_BASE_IDX 1
#define mmVGT_INSTANCE_BASE_ID 0x225a
#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
@@ -4996,8 +4984,6 @@
#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
-#define mmTA_GRAD_ADJ_UCONFIG 0x2382
-#define mmTA_GRAD_ADJ_UCONFIG_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 5c5e9b445432..2e1214be67a2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -4576,15 +4576,6 @@
//TA_RESERVED_010C
#define TA_RESERVED_010C__Unused__SHIFT 0x0
#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
-//TA_GRAD_ADJ
-#define TA_GRAD_ADJ__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ__GRAD_ADJ_3_MASK 0xFF000000L
//TA_STATUS
#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
@@ -14459,9 +14450,6 @@
#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
-//DB_RENDER_FILTER
-#define DB_RENDER_FILTER__PS_INVOKE_MASK__SHIFT 0x0
-#define DB_RENDER_FILTER__PS_INVOKE_MASK_MASK 0x0000FFFFL
//DB_Z_INFO2
#define DB_Z_INFO2__EPITCH__SHIFT 0x0
#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
@@ -14959,11 +14947,9 @@
#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
-#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING__SHIFT 0x8
#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
-#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING_MASK 0x00000100L
//CP_PERFMON_CNTX_CNTL
#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
@@ -15003,20 +14989,6 @@
#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_LR
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT__SHIFT 0x10
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT__SHIFT 0x18
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT_MASK 0x0000FF00L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT_MASK 0x00FF0000L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_TB
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT_MASK 0x0000FF00L
//VGT_MULTI_PRIM_IB_RESET_INDX
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
@@ -17010,13 +16982,11 @@
#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
-#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE__SHIFT 0x5
#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
-#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE_MASK 0x00000020L
//PA_CL_OBJPRIM_ID_CNTL
#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
@@ -17345,9 +17315,6 @@
#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
-//VGT_INDEX_PAYLOAD_CNTL
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN__SHIFT 0x0
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN_MASK 0x00000001L
//VGT_INSTANCE_STEP_RATE_0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
@@ -19849,9 +19816,6 @@
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
-//VGT_OBJECT_ID
-#define VGT_OBJECT_ID__REG_OBJ_ID__SHIFT 0x0
-#define VGT_OBJECT_ID__REG_OBJ_ID_MASK 0xFFFFFFFFL
//VGT_INSTANCE_BASE_ID
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
@@ -20067,15 +20031,6 @@
//TA_CS_BC_BASE_ADDR_HI
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
-//TA_GRAD_ADJ_UCONFIG
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3_MASK 0xFF000000L
//DB_OCCLUSION_COUNT0_LOW
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
index db7ef5ede0e5..030e0020902b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
@@ -815,8 +815,6 @@
#define mmTA_CNTL_AUX_BASE_IDX 0
#define mmTA_RESERVED_010C 0x0543
#define mmTA_RESERVED_010C_BASE_IDX 0
-#define mmTA_GRAD_ADJ 0x0544
-#define mmTA_GRAD_ADJ_BASE_IDX 0
#define mmTA_STATUS 0x0548
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
@@ -3617,8 +3615,6 @@
#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
#define mmDB_DFSM_CONTROL 0x0018
#define mmDB_DFSM_CONTROL_BASE_IDX 1
-#define mmDB_RENDER_FILTER 0x0019
-#define mmDB_RENDER_FILTER_BASE_IDX 1
#define mmDB_Z_INFO2 0x001a
#define mmDB_Z_INFO2_BASE_IDX 1
#define mmDB_STENCIL_INFO2 0x001b
@@ -3829,10 +3825,6 @@
#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
#define mmPA_SC_HORIZ_GRID 0x00ea
#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_LR 0x00eb
-#define mmPA_SC_FOV_WINDOW_LR_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_TB 0x00ec
-#define mmPA_SC_FOV_WINDOW_TB_BASE_IDX 1
#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
#define mmCB_BLEND_RED 0x0105
@@ -4361,8 +4353,6 @@
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
-#define mmVGT_INDEX_PAYLOAD_CNTL 0x02a7
-#define mmVGT_INDEX_PAYLOAD_CNTL_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
@@ -5195,8 +5185,6 @@
#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
#define mmIA_MULTI_VGT_PARAM 0x2258
#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
-#define mmVGT_OBJECT_ID 0x2259
-#define mmVGT_OBJECT_ID_BASE_IDX 1
#define mmVGT_INSTANCE_BASE_ID 0x225a
#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
@@ -5283,8 +5271,6 @@
#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
-#define mmTA_GRAD_ADJ_UCONFIG 0x2382
-#define mmTA_GRAD_ADJ_UCONFIG_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9d3bdada79d5..fa9d1615a2cc 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -389,20 +389,12 @@ static int pp_dpm_force_performance_level(void *handle,
if (level == hwmgr->dpm_level)
return 0;
- if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return 0;
- }
-
mutex_lock(&pp_handle->pp_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
- ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
- if (!ret)
- hwmgr->dpm_level = hwmgr->request_dpm_level;
-
mutex_unlock(&pp_handle->pp_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index ad1f6b57884b..b314d09d41af 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -728,9 +728,6 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
if (clock < stable_pstate_sclk)
clock = stable_pstate_sclk;
- } else {
- if (clock < hwmgr->gfx_arbiter.sclk)
- clock = hwmgr->gfx_arbiter.sclk;
}
if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
@@ -1085,14 +1082,8 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
uint32_t num_of_active_displays = 0;
struct cgs_display_info info = {0};
- cz_ps->evclk = hwmgr->vce_arbiter.evclk;
- cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
-
cz_ps->need_dfs_bypass = true;
- cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 ||
- hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0);
-
cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
@@ -1105,9 +1096,6 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
- if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
|| (num_of_active_displays >= 3);
@@ -1339,22 +1327,13 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*Program HardMin based on the vce_arbiter.ecclk */
- if (hwmgr->vce_arbiter.ecclk == 0) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin, 0);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
- smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin, 1);
- } else {
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
- }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 623cff90233d..2b0c53fe4c8d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -112,26 +112,29 @@ int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->force_dpm_level != NULL) {
+ if (hwmgr->hwmgr_func->force_dpm_level != NULL)
ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
- if (ret)
- return ret;
-
- if (hwmgr->hwmgr_func->set_power_profile_state) {
- if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->gfx_power_profile);
- else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->compute_power_profile);
- }
- }
return ret;
}
+int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ if (hwmgr->hwmgr_func->set_power_profile_state) {
+ if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE)
+ ret = hwmgr->hwmgr_func->set_power_profile_state(
+ hwmgr,
+ &hwmgr->gfx_power_profile);
+ else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE)
+ ret = hwmgr->hwmgr_func->set_power_profile_state(
+ hwmgr,
+ &hwmgr->compute_power_profile);
+ }
+ return ret;
+}
+
int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *adjusted_ps,
const struct pp_power_state *current_ps)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index ce59e0e67cb2..0229f774f7a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -149,6 +149,7 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
hwmgr->fan_ctrl_is_in_default_mode = true;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ffa44bbb218e..95ab772e0c3e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -244,6 +244,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
}
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
+ hwmgr->dpm_level = hwmgr->request_dpm_level;
+
+ phm_reset_power_profile_state(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 3e0b267c74a8..569073e3a5a1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -159,7 +159,6 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
@@ -170,39 +169,6 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
- if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
- ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
- rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
- rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinVcn,
- (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
- }
-
- if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
- ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetHardMinSocclkByFreq,
- hwmgr->gfx_arbiter.sclk_hard_min / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq);
- }
-
- if ((hwmgr->gfx_arbiter.gfxclk != 0) &&
- (rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinVideoGfxclkFreq,
- hwmgr->gfx_arbiter.gfxclk / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq);
- }
-
- if ((hwmgr->gfx_arbiter.fclk != 0) &&
- (rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinVideoFclkFreq,
- hwmgr->gfx_arbiter.fclk / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq);
- }
-
return 0;
}
@@ -518,17 +484,161 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
+ if (hwmgr->smu_version < 0x1E3700) {
+ pr_info("smu firmware version too old, can not set dpm level\n");
+ return 0;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ RAVEN_UMD_PSTATE_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ RAVEN_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ RAVEN_UMD_PSTATE_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ RAVEN_UMD_PSTATE_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ RAVEN_UMD_PSTATE_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ RAVEN_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ RAVEN_UMD_PSTATE_MIN_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ RAVEN_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
return 0;
}
static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
- return 0;
+ struct rv_hwmgr *data;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (low)
+ return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ else
+ return data->clock_vol_info.vdd_dep_on_fclk->entries[
+ data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
}
static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
- return 0;
+ struct rv_hwmgr *data;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (low)
+ return data->gfx_min_freq_limit;
+ else
+ return data->gfx_max_freq_limit;
}
static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index 9dc503055394..c3bc311dc59f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -304,4 +304,19 @@ struct pp_hwmgr;
int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
+/* UMD PState Raven Msg Parameters in MHz */
+#define RAVEN_UMD_PSTATE_GFXCLK 700
+#define RAVEN_UMD_PSTATE_SOCCLK 626
+#define RAVEN_UMD_PSTATE_FCLK 933
+#define RAVEN_UMD_PSTATE_VCE 0x03C00320
+
+#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100
+#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757
+#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200
+
+#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200
+#define RAVEN_UMD_PSTATE_MIN_FCLK 400
+#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200
+#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8edb0c4c3876..40adc855c416 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2722,9 +2722,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
cgs_get_active_displays_info(hwmgr->device, &info);
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
@@ -2754,38 +2751,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.memoryClock = stable_pstate_mclk;
}
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- smu7_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- smu7_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 07d256d136ad..2d55dabc77d4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -426,9 +426,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_VR0HOT].supported = true;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
+ vega10_read_arg_from_smc(hwmgr, &(hwmgr->smu_version));
/* ACG firmware has major version 5 */
- if ((data->smu_version & 0xff000000) == 0x5000000)
+ if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
if (data->registry_data.didt_support)
@@ -2879,8 +2879,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is already running right , skipping re-enablement!",
return 0);
- if ((data->smu_version == 0x001c2c00) ||
- (data->smu_version == 0x001c2d00)) {
+ if ((hwmgr->smu_version == 0x001c2c00) ||
+ (hwmgr->smu_version == 0x001c2d00)) {
tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3124,9 +3124,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
cgs_get_active_displays_info(hwmgr->device, &info);
/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
@@ -3165,38 +3162,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.memoryClock = stable_pstate_mclk;
}
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- vega10_ps->performance_levels[1].gfx_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- vega10_ps->performance_levels[1].mem_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
@@ -3819,10 +3784,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
uint32_t low_sclk_interrupt_threshold = 0;
if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
- (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 8f7358cc3327..e8507ff8dbb3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -387,7 +387,6 @@ struct vega10_hwmgr {
struct vega10_smc_state_table smc_state_table;
uint32_t config_telemetry;
- uint32_t smu_version;
uint32_t acg_loop_state;
uint32_t mem_channels;
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 57a0467b7267..5716b937a6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -437,5 +437,6 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+extern int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr);
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 004a40e88bde..565fe0832f41 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -105,36 +105,6 @@ struct phm_set_power_state_input {
const struct pp_hw_power_state *pnew_state;
};
-struct phm_acp_arbiter {
- uint32_t acpclk;
-};
-
-struct phm_uvd_arbiter {
- uint32_t vclk;
- uint32_t dclk;
- uint32_t vclk_ceiling;
- uint32_t dclk_ceiling;
- uint32_t vclk_soft_min;
- uint32_t dclk_soft_min;
-};
-
-struct phm_vce_arbiter {
- uint32_t evclk;
- uint32_t ecclk;
-};
-
-struct phm_gfx_arbiter {
- uint32_t sclk;
- uint32_t sclk_hard_min;
- uint32_t mclk;
- uint32_t sclk_over_drive;
- uint32_t mclk_over_drive;
- uint32_t sclk_threshold;
- uint32_t num_cus;
- uint32_t gfxclk;
- uint32_t fclk;
-};
-
struct phm_clock_array {
uint32_t count;
uint32_t values[1];
@@ -722,6 +692,7 @@ enum PP_TABLE_VERSION {
struct pp_hwmgr {
uint32_t chip_family;
uint32_t chip_id;
+ uint32_t smu_version;
uint32_t pp_table_version;
void *device;
@@ -737,10 +708,6 @@ struct pp_hwmgr {
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level request_dpm_level;
- struct phm_gfx_arbiter gfx_arbiter;
- struct phm_acp_arbiter acp_arbiter;
- struct phm_uvd_arbiter uvd_arbiter;
- struct phm_vce_arbiter vce_arbiter;
uint32_t usec_timeout;
void *pptable;
struct phm_platform_descriptor platform_descriptor;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 2b3497135bbd..f15f4df9d0a9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -75,7 +75,12 @@
#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
#define PPSMC_MSG_SoftReset 0x2E
-#define PPSMC_Message_Count 0x2F
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
+#define PPSMC_MSG_SetHardMinGfxClk 0x31
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
+#define PPSMC_MSG_SetSoftMaxVcn 0x34
+#define PPSMC_Message_Count 0x35
typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index c36f00ef46f3..0b4a55660de4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2218,10 +2218,7 @@ static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -2319,6 +2316,7 @@ static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
hwmgr->is_kicker = info.is_kicker;
+ hwmgr->smu_version = info.version;
byte_count = info.image_size;
src = (uint8_t *)info.kptr;
start_addr = info.ucode_start_address;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index f572beff197f..085d81c8b332 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -2385,10 +2385,7 @@ static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index d62078681cae..125312691f75 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -204,7 +204,7 @@ static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
-
+ hwmgr->smu_version = info.version;
/* wait for smc boot up */
PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
@@ -2202,10 +2202,7 @@ static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index bd6be7793ca7..cdb47657b567 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2369,10 +2369,7 @@ static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index b98ade676d12..2d662b44af54 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -305,6 +305,14 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr)
static int rv_start_smu(struct pp_hwmgr *hwmgr)
{
+ struct cgs_firmware_info info = {0};
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
+ info.version = hwmgr->smu_version >> 8;
+
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
if (rv_verify_smc_interface(hwmgr))
return -EINVAL;
if (rv_smc_enable_sdma(hwmgr))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 7f5359a97ef2..cb95e882b98f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -535,7 +535,7 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
hwmgr->is_kicker = info.is_kicker;
-
+ hwmgr->smu_version = info.version;
result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 81b8790c0d22..79e5c05571bc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -2654,10 +2654,7 @@ static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
deleted file mode 100644
index b590fcc2786a..000000000000
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _GPU_SCHEDULER_H_
-#define _GPU_SCHEDULER_H_
-
-#include <linux/kfifo.h>
-#include <linux/dma-fence.h>
-#include "spsc_queue.h"
-
-struct amd_gpu_scheduler;
-struct amd_sched_rq;
-
-enum amd_sched_priority {
- AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_NORMAL,
- AMD_SCHED_PRIORITY_HIGH_SW,
- AMD_SCHED_PRIORITY_HIGH_HW,
- AMD_SCHED_PRIORITY_KERNEL,
- AMD_SCHED_PRIORITY_MAX,
- AMD_SCHED_PRIORITY_INVALID = -1,
- AMD_SCHED_PRIORITY_UNSET = -2
-};
-
-
-/**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
-*/
-struct amd_sched_entity {
- struct list_head list;
- struct amd_sched_rq *rq;
- spinlock_t rq_lock;
- struct amd_gpu_scheduler *sched;
-
- spinlock_t queue_lock;
- struct spsc_queue job_queue;
-
- atomic_t fence_seq;
- uint64_t fence_context;
-
- struct dma_fence *dependency;
- struct dma_fence_cb cb;
- atomic_t *guilty; /* points to ctx's guilty */
-};
-
-/**
- * Run queue is a set of entities scheduling command submissions for
- * one specific ring. It implements the scheduling policy that selects
- * the next entity to emit commands from.
-*/
-struct amd_sched_rq {
- spinlock_t lock;
- struct list_head entities;
- struct amd_sched_entity *current_entity;
-};
-
-struct amd_sched_fence {
- struct dma_fence scheduled;
- struct dma_fence finished;
- struct dma_fence_cb cb;
- struct dma_fence *parent;
- struct amd_gpu_scheduler *sched;
- spinlock_t lock;
- void *owner;
-};
-
-struct amd_sched_job {
- struct spsc_node queue_node;
- struct amd_gpu_scheduler *sched;
- struct amd_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- struct work_struct finish_work;
- struct list_head node;
- struct delayed_work work_tdr;
- uint64_t id;
- atomic_t karma;
- enum amd_sched_priority s_priority;
-};
-
-extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
-extern const struct dma_fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
-{
- if (f->ops == &amd_sched_fence_ops_scheduled)
- return container_of(f, struct amd_sched_fence, scheduled);
-
- if (f->ops == &amd_sched_fence_ops_finished)
- return container_of(f, struct amd_sched_fence, finished);
-
- return NULL;
-}
-
-static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
-{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
-}
-
-/**
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
-struct amd_sched_backend_ops {
- struct dma_fence *(*dependency)(struct amd_sched_job *sched_job,
- struct amd_sched_entity *s_entity);
- struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*timedout_job)(struct amd_sched_job *sched_job);
- void (*free_job)(struct amd_sched_job *sched_job);
-};
-
-/**
- * One scheduler is implemented for each hardware ring
-*/
-struct amd_gpu_scheduler {
- const struct amd_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
- long timeout;
- const char *name;
- struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
- wait_queue_head_t wake_up_worker;
- wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
- atomic64_t job_id_count;
- struct task_struct *thread;
- struct list_head ring_mirror_list;
- spinlock_t job_list_lock;
- int hang_limit;
-};
-
-int amd_sched_init(struct amd_gpu_scheduler *sched,
- const struct amd_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name);
-void amd_sched_fini(struct amd_gpu_scheduler *sched);
-
-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_rq *rq,
- uint32_t jobs, atomic_t* guilty);
-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity);
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
- struct amd_sched_entity *entity);
-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
- struct amd_sched_rq *rq);
-
-int amd_sched_fence_slab_init(void);
-void amd_sched_fence_slab_fini(void);
-
-struct amd_sched_fence *amd_sched_fence_create(
- struct amd_sched_entity *s_entity, void *owner);
-void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_finished(struct amd_sched_fence *fence);
-int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void *owner);
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job);
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
-bool amd_sched_dependency_optimized(struct dma_fence* fence,
- struct amd_sched_entity *entity);
-void amd_sched_job_kickout(struct amd_sched_job *s_job);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 949bf6b3feab..6b6fb2080ac3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1226,7 +1226,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
+ ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
out:
ttm_bo_mem_put(bo, &tmp_reg);
return ret;
@@ -1255,7 +1255,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
+ ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
if (ret)
goto out;
@@ -1380,8 +1380,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d866f329e7d8..78ce118d9157 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -357,8 +357,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, ctx->interruptible, ctx->no_wait_gpu,
- new_mem);
+ return ttm_bo_move_memcpy(bo, ctx, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a6511918f632..d3045a371a55 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1627,8 +1627,6 @@ static const u32 godavari_golden_registers[] =
static void cik_init_golden_registers(struct radeon_device *rdev)
{
- /* Some of the registers might be dependent on GRBM_GFX_INDEX */
- mutex_lock(&rdev->grbm_idx_mutex);
switch (rdev->family) {
case CHIP_BONAIRE:
radeon_program_register_sequence(rdev,
@@ -1703,7 +1701,6 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
default:
break;
}
- mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3120,7 +3117,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -3132,7 +3128,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3143,7 +3138,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
rdev->config.cik.backend_enable_mask = enabled_rbs;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3171,7 +3165,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
WREG32(PA_SC_RASTER_CONFIG, data);
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3391,12 +3384,6 @@ static void cik_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
- mutex_lock(&rdev->grbm_idx_mutex);
- /*
- * making sure that the following register writes will be broadcasted
- * to all the shaders
- */
- cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SX_DEBUG_1, 0x20);
WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3452,7 +3439,6 @@ static void cik_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
- mutex_unlock(&rdev->grbm_idx_mutex);
udelay(50);
}
@@ -4432,11 +4418,12 @@ static int cik_mec_init(struct radeon_device *rdev)
/*
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
- * Nonetheless, we assign only 1 pipe because all other pipes will
- * be handled by KFD
*/
- rdev->mec.num_mec = 1;
- rdev->mec.num_pipe = 1;
+ if (rdev->family == CHIP_KAVERI)
+ rdev->mec.num_mec = 2;
+ else
+ rdev->mec.num_mec = 1;
+ rdev->mec.num_pipe = 4;
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4579,8 +4566,11 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* init the pipes */
mutex_lock(&rdev->srbm_mutex);
- for (i = 0; i < rdev->mec.num_pipe; ++i) {
- cik_srbm_select(rdev, 0, i, 0, 0);
+ for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); ++i) {
+ int me = (i < 4) ? 1 : 2;
+ int pipe = (i < 4) ? i : (i - 4);
+
+ cik_srbm_select(rdev, me, pipe, 0, 0);
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2) ;
/* write the EOP addr */
@@ -4597,6 +4587,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
+ cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
@@ -5830,7 +5821,6 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
u32 i, j, k;
u32 mask;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -5842,7 +5832,6 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
for (k = 0; k < rdev->usec_timeout; k++) {
@@ -5977,12 +5966,10 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(RLC_LB_PARAMS, 0x00600408);
WREG32(RLC_LB_CNTL, 0x80000004);
- mutex_unlock(&rdev->grbm_idx_mutex);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
@@ -6049,13 +6036,11 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
WREG32(RLC_SERDES_WR_CTRL, tmp2);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6098,13 +6083,11 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
WREG32(RLC_SERDES_WR_CTRL, data);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6148,13 +6131,11 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
WREG32(RLC_SERDES_WR_CTRL, data);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
}
@@ -6583,12 +6564,10 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
u32 mask = 0, tmp, tmp1;
int i;
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
tmp &= 0xffff0000;
@@ -7074,7 +7053,8 @@ static int cik_irq_init(struct radeon_device *rdev)
int cik_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
- u32 cp_m1p0;
+ u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
+ u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -7107,6 +7087,13 @@ int cik_irq_set(struct radeon_device *rdev)
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -7121,6 +7108,33 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
+ case 1:
+ cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ default:
+ DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+ break;
+ }
+ } else if (ring->me == 2) {
+ switch (ring->pipe) {
+ case 0:
+ cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 1:
+ cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
@@ -7137,6 +7151,33 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
+ case 1:
+ cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ default:
+ DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+ break;
+ }
+ } else if (ring->me == 2) {
+ switch (ring->pipe) {
+ case 0:
+ cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 1:
+ cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
@@ -7217,6 +7258,13 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
+ WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
+ WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
+ WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
+ WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
+ WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
+ WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
+ WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index 4e883fdc59d8..318377df09ef 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,8 +147,6 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
-#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
-
#define SQ_IND_INDEX 0x8DE0
#define SQ_CMD 0x8DEC
#define SQ_IND_DATA 0x8DE4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a8e546569858..d34887873dea 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -731,10 +731,6 @@ struct radeon_doorbell {
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
/*
* IRQS.
@@ -2442,8 +2438,6 @@ struct radeon_device {
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
- /* GRBM index mutex. Protects concurrents access to GRBM index */
- struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ffc10cadcf34..8d3e3d2e0090 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -392,37 +392,6 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
__clear_bit(doorbell, rdev->doorbell.used);
}
-/**
- * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup KFD
- *
- * @rdev: radeon_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for radeon.
- *
- * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
- * takes doorbells required for its own rings and reports the setup to KFD.
- * Radeon reserved doorbells are at the start of the doorbell aperture.
- */
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /* The first num_doorbells are used by radeon.
- * KFD takes whatever's left in the aperture. */
- if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = rdev->doorbell.base;
- *aperture_size = rdev->doorbell.size;
- *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
-
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
@@ -1341,7 +1310,6 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
- mutex_init(&rdev->grbm_idx_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 98e30d71d9e0..557fd7915973 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -347,7 +347,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, &ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -380,7 +380,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -445,8 +445,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
new file mode 100644
index 000000000000..bd0377c0d2ee
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -0,0 +1,26 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+ccflags-y := -Iinclude/drm
+gpu-sched-y := gpu_scheduler.o sched_fence.o
+
+obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index dcb987e6d94a..2c18996d59c5 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -19,37 +19,36 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- *
*/
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <drm/drmP.h>
-#include "gpu_scheduler.h"
-
-#include "spsc_queue.h"
+#include <drm/gpu_scheduler.h>
+#include <drm/spsc_queue.h>
#define CREATE_TRACE_POINTS
-#include "gpu_sched_trace.h"
+#include <drm/gpu_scheduler_trace.h>
-#define to_amd_sched_job(sched_job) \
- container_of((sched_job), struct amd_sched_job, queue_node)
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
/* Initialize a given run queue struct */
-static void amd_sched_rq_init(struct amd_sched_rq *rq)
+static void drm_sched_rq_init(struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
}
-static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
- struct amd_sched_entity *entity)
+static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
{
if (!list_empty(&entity->list))
return;
@@ -58,8 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
spin_unlock(&rq->lock);
}
-static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
- struct amd_sched_entity *entity)
+static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
{
if (list_empty(&entity->list))
return;
@@ -77,17 +76,17 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
*
* Try to find a ready entity, returns NULL if none found.
*/
-static struct amd_sched_entity *
-amd_sched_rq_select_entity(struct amd_sched_rq *rq)
+static struct drm_sched_entity *
+drm_sched_rq_select_entity(struct drm_sched_rq *rq)
{
- struct amd_sched_entity *entity;
+ struct drm_sched_entity *entity;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (amd_sched_entity_is_ready(entity)) {
+ if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return entity;
@@ -97,7 +96,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
list_for_each_entry(entity, &rq->entities, list) {
- if (amd_sched_entity_is_ready(entity)) {
+ if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return entity;
@@ -116,22 +115,22 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
* Init a context entity used by scheduler when submit to HW ring.
*
* @sched The pointer to the scheduler
- * @entity The pointer to a valid amd_sched_entity
+ * @entity The pointer to a valid drm_sched_entity
* @rq The run queue this entity belongs
* @kernel If this is an entity for the kernel
* @jobs The max number of jobs in the job queue
*
* return 0 if succeed. negative error code on failure
*/
-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_rq *rq,
+int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
uint32_t jobs, atomic_t *guilty)
{
if (!(sched && entity && rq))
return -EINVAL;
- memset(entity, 0, sizeof(struct amd_sched_entity));
+ memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq;
entity->sched = sched;
@@ -146,6 +145,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return 0;
}
+EXPORT_SYMBOL(drm_sched_entity_init);
/**
* Query if entity is initialized
@@ -155,8 +155,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
*
* return true if entity is initialized, false otherwise
*/
-static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
return entity->sched == sched &&
entity->rq != NULL;
@@ -169,7 +169,7 @@ static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
*
* Return true if entity don't has any unscheduled jobs.
*/
-static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
if (spsc_queue_peek(&entity->job_queue) == NULL)
@@ -185,7 +185,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
*
* Return true if entity could provide a job.
*/
-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
return false;
@@ -204,12 +204,12 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
*
* Cleanup and free the allocated resources.
*/
-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
int r;
- if (!amd_sched_entity_is_initialized(sched, entity))
+ if (!drm_sched_entity_is_initialized(sched, entity))
return;
/**
* The client will not queue more IBs during this fini, consume existing
@@ -219,10 +219,10 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
r = -ERESTARTSYS;
else
r = wait_event_killable(sched->job_scheduled,
- amd_sched_entity_is_idle(entity));
- amd_sched_entity_set_rq(entity, NULL);
+ drm_sched_entity_is_idle(entity));
+ drm_sched_entity_set_rq(entity, NULL);
if (r) {
- struct amd_sched_job *job;
+ struct drm_sched_job *job;
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
@@ -236,37 +236,38 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
entity->dependency = NULL;
}
- while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) {
- struct amd_sched_fence *s_fence = job->s_fence;
- amd_sched_fence_scheduled(s_fence);
+ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = job->s_fence;
+ drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- amd_sched_fence_finished(s_fence);
+ drm_sched_fence_finished(s_fence);
WARN_ON(s_fence->parent);
dma_fence_put(&s_fence->finished);
sched->ops->free_job(job);
}
}
}
+EXPORT_SYMBOL(drm_sched_entity_fini);
-static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_entity *entity =
- container_of(cb, struct amd_sched_entity, cb);
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- amd_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_entity *entity =
- container_of(cb, struct amd_sched_entity, cb);
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
}
-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
- struct amd_sched_rq *rq)
+void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq)
{
if (entity->rq == rq)
return;
@@ -274,37 +275,39 @@ void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
spin_lock(&entity->rq_lock);
if (entity->rq)
- amd_sched_rq_remove_entity(entity->rq, entity);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
if (rq)
- amd_sched_rq_add_entity(rq, entity);
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
+EXPORT_SYMBOL(drm_sched_entity_set_rq);
-bool amd_sched_dependency_optimized(struct dma_fence* fence,
- struct amd_sched_entity *entity)
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
- struct amd_sched_fence *s_fence;
+ struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
return false;
if (fence->context == entity->fence_context)
return true;
- s_fence = to_amd_sched_fence(fence);
+ s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched)
return true;
return false;
}
+EXPORT_SYMBOL(drm_sched_dependency_optimized);
-static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->sched;
struct dma_fence * fence = entity->dependency;
- struct amd_sched_fence *s_fence;
+ struct drm_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
@@ -312,7 +315,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
return false;
}
- s_fence = to_amd_sched_fence(fence);
+ s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
/*
@@ -323,7 +326,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
dma_fence_put(entity->dependency);
entity->dependency = fence;
if (!dma_fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ drm_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
@@ -332,25 +335,25 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ drm_sched_entity_wakeup))
return true;
dma_fence_put(entity->dependency);
return false;
}
-static struct amd_sched_job *
-amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+static struct drm_sched_job *
+drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
- struct amd_sched_job *sched_job = to_amd_sched_job(
+ struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
if (!sched_job)
return NULL;
while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
- if (amd_sched_entity_add_dependency_cb(entity))
+ if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
/* skip jobs from entity that marked guilty */
@@ -368,13 +371,13 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
*
* Returns 0 for success, negative error code otherwise.
*/
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
- struct amd_sched_entity *entity)
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = sched_job->sched;
+ struct drm_gpu_scheduler *sched = sched_job->sched;
bool first = false;
- trace_amd_sched_job(sched_job, entity);
+ trace_drm_sched_job(sched_job, entity);
spin_lock(&entity->queue_lock);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -385,25 +388,26 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
- amd_sched_rq_add_entity(entity->rq, entity);
+ drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
- amd_sched_wakeup(sched);
+ drm_sched_wakeup(sched);
}
}
+EXPORT_SYMBOL(drm_sched_entity_push_job);
/* job_finish is called after hw fence signaled
*/
-static void amd_sched_job_finish(struct work_struct *work)
+static void drm_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
- struct amd_gpu_scheduler *sched = s_job->sched;
+ struct drm_gpu_scheduler *sched = s_job->sched;
/* remove job from ring_mirror_list */
spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct amd_sched_job *next;
+ struct drm_sched_job *next;
spin_unlock(&sched->job_list_lock);
cancel_delayed_work_sync(&s_job->work_tdr);
@@ -411,7 +415,7 @@ static void amd_sched_job_finish(struct work_struct *work)
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node);
+ struct drm_sched_job, node);
if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
@@ -421,42 +425,42 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct dma_fence *f,
+static void drm_sched_job_finish_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
schedule_work(&job->finish_work);
}
-static void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
- struct amd_gpu_scheduler *sched = s_job->sched;
+ struct drm_gpu_scheduler *sched = s_job->sched;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
- amd_sched_job_finish_cb);
+ drm_sched_job_finish_cb);
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node) == s_job)
+ struct drm_sched_job, node) == s_job)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
spin_unlock(&sched->job_list_lock);
}
-static void amd_sched_job_timedout(struct work_struct *work)
+static void drm_sched_job_timedout(struct work_struct *work)
{
- struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ struct drm_sched_job *job = container_of(work, struct drm_sched_job,
work_tdr.work);
job->sched->ops->timedout_job(job);
}
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
- struct amd_sched_job *s_job;
- struct amd_sched_entity *entity, *tmp;
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *entity, *tmp;
int i;;
spin_lock(&sched->job_list_lock);
@@ -471,14 +475,14 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
}
spin_unlock(&sched->job_list_lock);
- if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) {
+ if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
/* don't increase @bad's karma if it's from KERNEL RQ,
* becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
* corrupt but keep in mind that kernel jobs always considered good.
*/
- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) {
- struct amd_sched_rq *rq = &sched->sched_rq[i];
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
@@ -495,30 +499,22 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
}
}
}
+EXPORT_SYMBOL(drm_sched_hw_job_reset);
-void amd_sched_job_kickout(struct amd_sched_job *s_job)
-{
- struct amd_gpu_scheduler *sched = s_job->sched;
-
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- spin_unlock(&sched->job_list_lock);
-}
-
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
- struct amd_sched_job *s_job, *tmp;
+ struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
int r;
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node);
+ struct drm_sched_job, node);
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
- struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
uint64_t guilty_context;
@@ -536,45 +532,47 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ drm_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &s_fence->cb);
+ drm_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
- amd_sched_process_job(NULL, &s_fence->cb);
+ drm_sched_process_job(NULL, &s_fence->cb);
}
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
}
+EXPORT_SYMBOL(drm_sched_job_recovery);
/* init a sched_job with basic field */
-int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
void *owner)
{
job->sched = sched;
job->s_priority = entity->rq - sched->sched_rq;
- job->s_fence = amd_sched_fence_create(entity, owner);
+ job->s_fence = drm_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
- INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
+ INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
return 0;
}
+EXPORT_SYMBOL(drm_sched_job_init);
/**
* Return ture if we can push more jobs to the hw.
*/
-static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
{
return atomic_read(&sched->hw_rq_count) <
sched->hw_submission_limit;
@@ -583,27 +581,27 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
/**
* Wake up the scheduler when it is ready
*/
-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
- if (amd_sched_ready(sched))
+ if (drm_sched_ready(sched))
wake_up_interruptible(&sched->wake_up_worker);
}
/**
* Select next entity to process
*/
-static struct amd_sched_entity *
-amd_sched_select_entity(struct amd_gpu_scheduler *sched)
+static struct drm_sched_entity *
+drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
- struct amd_sched_entity *entity;
+ struct drm_sched_entity *entity;
int i;
- if (!amd_sched_ready(sched))
+ if (!drm_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity)
break;
}
@@ -611,22 +609,22 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_fence *s_fence =
- container_of(cb, struct amd_sched_fence, cb);
- struct amd_gpu_scheduler *sched = s_fence->sched;
+ struct drm_sched_fence *s_fence =
+ container_of(cb, struct drm_sched_fence, cb);
+ struct drm_gpu_scheduler *sched = s_fence->sched;
dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
- amd_sched_fence_finished(s_fence);
+ drm_sched_fence_finished(s_fence);
- trace_amd_sched_process_job(s_fence);
+ trace_drm_sched_process_job(s_fence);
dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
-static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
kthread_parkme();
@@ -636,52 +634,52 @@ static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
return false;
}
-static int amd_sched_main(void *param)
+static int drm_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
- struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+ struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
int r;
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity = NULL;
- struct amd_sched_fence *s_fence;
- struct amd_sched_job *sched_job;
+ struct drm_sched_entity *entity = NULL;
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (!amd_sched_blocked(sched) &&
- (entity = amd_sched_select_entity(sched))) ||
+ (!drm_sched_blocked(sched) &&
+ (entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
if (!entity)
continue;
- sched_job = amd_sched_entity_pop_job(entity);
+ sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job)
continue;
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_begin(sched_job);
+ drm_sched_job_begin(sched_job);
fence = sched->ops->run_job(sched_job);
- amd_sched_fence_scheduled(s_fence);
+ drm_sched_fence_scheduled(s_fence);
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ drm_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &s_fence->cb);
+ drm_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
- amd_sched_process_job(NULL, &s_fence->cb);
+ drm_sched_process_job(NULL, &s_fence->cb);
}
wake_up(&sched->job_scheduled);
@@ -699,8 +697,8 @@ static int amd_sched_main(void *param)
*
* Return 0 on success, otherwise error code.
*/
-int amd_sched_init(struct amd_gpu_scheduler *sched,
- const struct amd_sched_backend_ops *ops,
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
unsigned hw_submission,
unsigned hang_limit,
long timeout,
@@ -712,8 +710,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
sched->name = name;
sched->timeout = timeout;
sched->hang_limit = hang_limit;
- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
- amd_sched_rq_init(&sched->sched_rq[i]);
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
+ drm_sched_rq_init(&sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
@@ -723,7 +721,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
- sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+ sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
DRM_ERROR("Failed to create scheduler for %s.\n", name);
return PTR_ERR(sched->thread);
@@ -731,14 +729,16 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
return 0;
}
+EXPORT_SYMBOL(drm_sched_init);
/**
* Destroy a gpu scheduler
*
* @sched The pointer to the scheduler
*/
-void amd_sched_fini(struct amd_gpu_scheduler *sched)
+void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
}
+EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 33f54d0a5c4f..69aab086b913 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -19,20 +19,20 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- *
*/
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <drm/drmP.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
static struct kmem_cache *sched_fence_slab;
-int amd_sched_fence_slab_init(void)
+static int __init drm_sched_fence_slab_init(void)
{
sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
@@ -40,36 +40,13 @@ int amd_sched_fence_slab_init(void)
return 0;
}
-void amd_sched_fence_slab_fini(void)
+static void __exit drm_sched_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(sched_fence_slab);
}
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
- void *owner)
-{
- struct amd_sched_fence *fence = NULL;
- unsigned seq;
-
- fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
- if (fence == NULL)
- return NULL;
-
- fence->owner = owner;
- fence->sched = entity->sched;
- spin_lock_init(&fence->lock);
-
- seq = atomic_inc_return(&entity->fence_seq);
- dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
-
- return fence;
-}
-
-void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->scheduled);
@@ -81,7 +58,7 @@ void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
"was already signaled\n");
}
-void amd_sched_fence_finished(struct amd_sched_fence *fence)
+void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->finished);
@@ -93,18 +70,18 @@ void amd_sched_fence_finished(struct amd_sched_fence *fence)
"was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
+static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
{
- return "amd_sched";
+ return "drm_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
+static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
+static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -116,10 +93,10 @@ static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
*
* Free up the fence memory after the RCU grace period.
*/
-static void amd_sched_fence_free(struct rcu_head *rcu)
+static void drm_sched_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
@@ -133,11 +110,11 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct dma_fence *f)
+static void drm_sched_fence_release_scheduled(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
- call_rcu(&fence->finished.rcu, amd_sched_fence_free);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free);
}
/**
@@ -147,27 +124,68 @@ static void amd_sched_fence_release_scheduled(struct dma_fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct dma_fence *f)
+static void drm_sched_fence_release_finished(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
- .get_driver_name = amd_sched_fence_get_driver_name,
- .get_timeline_name = amd_sched_fence_get_timeline_name,
- .enable_signaling = amd_sched_fence_enable_signaling,
+const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .enable_signaling = drm_sched_fence_enable_signaling,
.signaled = NULL,
.wait = dma_fence_default_wait,
- .release = amd_sched_fence_release_scheduled,
+ .release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops amd_sched_fence_ops_finished = {
- .get_driver_name = amd_sched_fence_get_driver_name,
- .get_timeline_name = amd_sched_fence_get_timeline_name,
- .enable_signaling = amd_sched_fence_enable_signaling,
+const struct dma_fence_ops drm_sched_fence_ops_finished = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .enable_signaling = drm_sched_fence_enable_signaling,
.signaled = NULL,
.wait = dma_fence_default_wait,
- .release = amd_sched_fence_release_finished,
+ .release = drm_sched_fence_release_finished,
};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
+{
+ if (f->ops == &drm_sched_fence_ops_scheduled)
+ return container_of(f, struct drm_sched_fence, scheduled);
+
+ if (f->ops == &drm_sched_fence_ops_finished)
+ return container_of(f, struct drm_sched_fence, finished);
+
+ return NULL;
+}
+EXPORT_SYMBOL(to_drm_sched_fence);
+
+struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
+ void *owner)
+{
+ struct drm_sched_fence *fence = NULL;
+ unsigned seq;
+
+ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ fence->owner = owner;
+ fence->sched = entity->sched;
+ spin_lock_init(&fence->lock);
+
+ seq = atomic_inc_return(&entity->fence_seq);
+ dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
+
+ return fence;
+}
+
+module_init(drm_sched_fence_slab_init);
+module_exit(drm_sched_fence_slab_fini);
+
+MODULE_DESCRIPTION("DRM GPU scheduler");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 97c3da6d5f17..60bb5c12b568 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,10 +42,6 @@
#include <linux/atomic.h>
#include <linux/reservation.h>
-#define TTM_ASSERT_LOCKED(param)
-#define TTM_DEBUG(fmt, arg...)
-#define TTM_BO_HASH_ORDER 13
-
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -165,7 +161,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -216,7 +212,7 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
@@ -233,7 +229,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
int ret = 0;
uint32_t page_flags = 0;
- TTM_ASSERT_LOCKED(&bo->mutex);
+ reservation_object_assert_held(bo->resv);
bo->ttm = NULL;
if (bdev->need_dma32)
@@ -324,13 +320,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, ctx->interruptible,
- ctx->no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, ctx, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, ctx, mem);
else
- ret = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, ctx, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -588,12 +582,17 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
ddestroy);
kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
- spin_unlock(&glob->lru_lock);
- reservation_object_lock(bo->resv, NULL);
+ if (remove_all || bo->resv != &bo->ttm_resv) {
+ spin_unlock(&glob->lru_lock);
+ reservation_object_lock(bo->resv, NULL);
- spin_lock(&glob->lru_lock);
- ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ spin_lock(&glob->lru_lock);
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+
+ } else if (reservation_object_trylock(bo->resv)) {
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ }
kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&glob->lru_lock);
@@ -662,7 +661,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int ret = 0;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -709,7 +708,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
- struct reservation_object *resv,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx)
@@ -724,8 +722,9 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- if (bo->resv == resv) {
- if (list_empty(&bo->ddestroy))
+ if (bo->resv == ctx->resv) {
+ if (!ctx->allow_reserved_eviction &&
+ list_empty(&bo->ddestroy))
continue;
} else {
locked = reservation_object_trylock(bo->resv);
@@ -737,6 +736,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
place)) {
if (locked)
reservation_object_unlock(bo->resv);
+ locked = false;
continue;
}
break;
@@ -836,7 +836,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -1018,7 +1018,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
int ret = 0;
struct ttm_mem_reg mem;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1088,7 +1088,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
int ret;
uint32_t new_flags;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
/*
* Check whether we need to move buffer.
*/
@@ -1182,7 +1182,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->sg = sg;
if (resv) {
bo->resv = resv;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
} else {
bo->resv = &bo->ttm_resv;
}
@@ -1204,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = ww_mutex_trylock(&bo->resv->lock);
+ locked = reservation_object_trylock(bo->resv);
WARN_ON(!locked);
}
@@ -1333,8 +1333,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, NULL, mem_type,
- NULL, &ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1541,12 +1540,12 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
cancel_delayed_work_sync(&bdev->wq);
if (ttm_bo_delayed_delete(bdev, true))
- TTM_DEBUG("Delayed destroy list was clean\n");
+ pr_debug("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&bdev->man[0].lru[0]))
- TTM_DEBUG("Swap list %d was clean\n", i);
+ pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index e7a519f1849b..6e353df4e4bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -53,7 +53,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
@@ -329,7 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -345,7 +345,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
@@ -485,7 +485,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->acc_size = 0;
fbo->resv = &fbo->ttm_resv;
reservation_object_init(fbo->resv);
- ret = ww_mutex_trylock(&fbo->resv->lock);
+ ret = reservation_object_trylock(fbo->resv);
WARN_ON(!ret);
*new_obj = fbo;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 44343a2bf55c..b5ba6441489f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan)
break;
+ shrink_pages <<= pool->order;
}
mutex_unlock(&lock);
return freed;
@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int r = 0;
unsigned i, j, cpages;
unsigned npages = 1 << order;
- unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
+ unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
new file mode 100644
index 000000000000..dfd54fb94e10
--- /dev/null
+++ b/include/drm/gpu_scheduler.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _DRM_GPU_SCHEDULER_H_
+#define _DRM_GPU_SCHEDULER_H_
+
+#include <drm/spsc_queue.h>
+#include <linux/dma-fence.h>
+
+struct drm_gpu_scheduler;
+struct drm_sched_rq;
+
+enum drm_sched_priority {
+ DRM_SCHED_PRIORITY_MIN,
+ DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_HIGH_SW,
+ DRM_SCHED_PRIORITY_HIGH_HW,
+ DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_MAX,
+ DRM_SCHED_PRIORITY_INVALID = -1,
+ DRM_SCHED_PRIORITY_UNSET = -2
+};
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct drm_sched_entity {
+ struct list_head list;
+ struct drm_sched_rq *rq;
+ spinlock_t rq_lock;
+ struct drm_gpu_scheduler *sched;
+
+ spinlock_t queue_lock;
+ struct spsc_queue job_queue;
+
+ atomic_t fence_seq;
+ uint64_t fence_context;
+
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
+ atomic_t *guilty; /* points to ctx's guilty */
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct drm_sched_rq {
+ spinlock_t lock;
+ struct list_head entities;
+ struct drm_sched_entity *current_entity;
+};
+
+struct drm_sched_fence {
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
+ struct drm_gpu_scheduler *sched;
+ spinlock_t lock;
+ void *owner;
+};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
+
+struct drm_sched_job {
+ struct spsc_node queue_node;
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_fence *s_fence;
+ struct dma_fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
+ uint64_t id;
+ atomic_t karma;
+ enum drm_sched_priority s_priority;
+};
+
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct drm_sched_backend_ops {
+ struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity);
+ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+ void (*timedout_job)(struct drm_sched_job *sched_job);
+ void (*free_job)(struct drm_sched_job *sched_job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct drm_gpu_scheduler {
+ const struct drm_sched_backend_ops *ops;
+ uint32_t hw_submission_limit;
+ long timeout;
+ const char *name;
+ struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
+ wait_queue_head_t wake_up_worker;
+ wait_queue_head_t job_scheduled;
+ atomic_t hw_rq_count;
+ atomic64_t job_id_count;
+ struct task_struct *thread;
+ struct list_head ring_mirror_list;
+ spinlock_t job_list_lock;
+ int hang_limit;
+};
+
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ uint32_t hw_submission, unsigned hang_limit, long timeout,
+ const char *name);
+void drm_sched_fini(struct drm_gpu_scheduler *sched);
+
+int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ uint32_t jobs, atomic_t *guilty);
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity);
+void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq);
+
+struct drm_sched_fence *drm_sched_fence_create(
+ struct drm_sched_entity *s_entity, void *owner);
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
+void drm_sched_fence_finished(struct drm_sched_fence *fence);
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ void *owner);
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job);
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity);
+void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
+#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/include/drm/gpu_scheduler_trace.h
index b42a78922505..0789e8d0a0e1 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/include/drm/gpu_scheduler_trace.h
@@ -31,14 +31,14 @@
#include <drm/drmP.h>
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM gpu_sched
-#define TRACE_INCLUDE_FILE gpu_sched_trace
+#define TRACE_SYSTEM gpu_scheduler
+#define TRACE_INCLUDE_FILE gpu_scheduler_trace
-TRACE_EVENT(amd_sched_job,
- TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity),
+TRACE_EVENT(drm_sched_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity),
TP_STRUCT__entry(
- __field(struct amd_sched_entity *, entity)
+ __field(struct drm_sched_entity *, entity)
__field(struct dma_fence *, fence)
__field(const char *, name)
__field(uint64_t, id)
@@ -61,8 +61,8 @@ TRACE_EVENT(amd_sched_job,
__entry->job_count, __entry->hw_job_count)
);
-TRACE_EVENT(amd_sched_process_job,
- TP_PROTO(struct amd_sched_fence *fence),
+TRACE_EVENT(drm_sched_process_job,
+ TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
__field(struct dma_fence *, fence)
diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/include/drm/spsc_queue.h
index 5902f35ce759..125f096c88cb 100644
--- a/drivers/gpu/drm/amd/scheduler/spsc_queue.h
+++ b/include/drm/spsc_queue.h
@@ -21,10 +21,11 @@
*
*/
-#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_
-#define AMD_SCHEDULER_SPSC_QUEUE_H_
+#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
+#define DRM_SCHEDULER_SPSC_QUEUE_H_
#include <linux/atomic.h>
+#include <linux/preempt.h>
/** SPSC lockless queue */
@@ -118,4 +119,4 @@ static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
-#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */
+#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 368eb02b54a9..c1263308145a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -263,6 +263,8 @@ struct ttm_bo_kmap_obj {
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
+ * @allow_reserved_eviction: Allow eviction of reserved BOs.
+ * @resv: Reservation object to allow reserved evictions with.
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
@@ -270,6 +272,8 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
+ bool allow_reserved_eviction;
+ struct reservation_object *resv;
uint64_t bytes_moved;
};
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6996d884c508..5115718ca607 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -976,7 +976,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
*/
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem);
/**
@@ -998,7 +998,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem);
/**