diff options
64 files changed, 456 insertions, 215 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f5f2945711be..35dd6effa9a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev) { int ret; - if (!adev->kfd.init_complete) + if (!adev->kfd.init_complete || adev->kfd.client.dev) return 0; ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 14dc9d2d8d53..df58a6a1a67e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * mutex_lock(&process_info->lock); - drm_exec_init(&exec, 0, 0); + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); drm_exec_retry_on_contention(&exec); - if (unlikely(ret)) + if (unlikely(ret)) { + pr_err("Locking VM PD failed, ret: %d\n", ret); goto ttm_reserve_fail; + } } /* Reserve all BOs and page tables/directory. Add all BOs from @@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * gobj = &mem->bo->tbo.base; ret = drm_exec_prepare_obj(&exec, gobj, 1); drm_exec_retry_on_contention(&exec); - if (unlikely(ret)) + if (unlikely(ret)) { + pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret); goto ttm_reserve_fail; + } } } @@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * * validations above would invalidate DMABuf imports again. */ ret = process_validate_vms(process_info, &exec.ticket); - if (ret) + if (ret) { + pr_debug("Validating VMs failed, ret: %d\n", ret); goto validate_map_fail; + } /* Update mappings not managed by KFD */ list_for_each_entry(peer_vm, &process_info->vm_list_head, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1e9454e6e4cb..5dc24c971b41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, * early on during init and before calling to RREG32. */ adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); - if (!adev->reset_domain) { - r = -ENOMEM; - goto unmap_memory; - } + if (!adev->reset_domain) + return -ENOMEM; /* detect hw virtualization here */ amdgpu_detect_virtualization(adev); @@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_device_get_job_timeout_settings(adev); if (r) { dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - goto unmap_memory; + return r; } amdgpu_device_set_mcbp(adev); @@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) - goto unmap_memory; + return r; /* Get rid of things like offb */ r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); if (r) - goto unmap_memory; + return r; /* Enable TMZ based on IP_VERSION */ amdgpu_gmc_tmz_set(adev); @@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (adev->gmc.xgmi.supported) { r = adev->gfxhub.funcs->get_xgmi_info(adev); if (r) - goto unmap_memory; + return r; } /* enable PCIE atomic ops */ @@ -4345,8 +4343,6 @@ release_ras_con: failed: amdgpu_vf_error_trans_all(adev); -unmap_memory: - iounmap(adev->rmmio); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 15b188aaf681..80b9642f2bc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2479,8 +2479,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) } for (i = 0; i < mgpu_info.num_dgpu; i++) { adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) + if (!adev->kfd.init_complete) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + amdgpu_amdkfd_drm_client_create(adev); + } amdgpu_ttm_set_buffer_funcs_status(adev, true); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f8b48fd93108..55d5508987ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KCQ enable failed\n"); + DRM_ERROR("KGQ enable failed\n"); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 55b65fc04b65..431ec72655ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = { */ int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) { + int r; + if (bo->kfd_bo) - return mmu_interval_notifier_insert(&bo->notifier, current->mm, + r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, amdgpu_bo_size(bo), &amdgpu_hmm_hsa_ops); - return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, - amdgpu_bo_size(bo), - &amdgpu_hmm_gfx_ops); + else + r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, + amdgpu_bo_size(bo), + &amdgpu_hmm_gfx_ops); + if (r) + /* + * Make sure amdgpu_hmm_unregister() doesn't call + * mmu_interval_notifier_remove() when the notifier isn't properly + * initialized. + */ + bo->notifier.mm = NULL; + + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 3c2b1413058b..94b310fdb719 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; + /* bypass hdcp initialization if dmu is harvested */ + if (!amdgpu_device_has_display_hardware(psp->adev)) + return 0; + if (!psp->hdcp_context.context.bin_desc.size_bytes || !psp->hdcp_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); @@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_sriov_vf(psp->adev)) return 0; + if (!psp->hdcp_context.context.initialized) + return 0; + return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); } @@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; + /* bypass dtm initialization if dmu is harvested */ + if (!amdgpu_device_has_display_hardware(psp->adev)) + return 0; + if (!psp->dtm_context.context.bin_desc.size_bytes || !psp->dtm_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); @@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_sriov_vf(psp->adev)) return 0; + if (!psp->dtm_context.context.initialized) + return 0; + return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); } @@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; + /* bypass securedisplay initialization if dmu is harvested */ + if (!amdgpu_device_has_display_hardware(psp->adev)) + return 0; + if (!psp->securedisplay_context.context.bin_desc.size_bytes || !psp->securedisplay_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8722beba494e..fc418e670fda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -864,6 +864,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, gtt->ttm.dma_address, flags); } + gtt->bound = true; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index b2535023764f..9c514a606a2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -60,6 +60,7 @@ #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" #define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" #define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin" +#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin" #define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); @@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); MODULE_FIRMWARE(FIRMWARE_VCN4_0_6); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1); MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev) { char ucode_prefix[30]; char fw_name[40]; - int r; + int r, i; - amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name); - if (r) - amdgpu_ucode_release(&adev->vcn.fw); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix)); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) && + i == 1) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i); + } + r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name); + if (r) { + amdgpu_ucode_release(&adev->vcn.fw[i]); + return r; + } + } return r; } @@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } } - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); /* Bit 20-23, it is encode major and non-zero for new naming convention. @@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_enc_rings; ++i) amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); + + amdgpu_ucode_release(&adev->vcn.fw[j]); } - amdgpu_ucode_release(&adev->vcn.fw); mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); mutex_destroy(&adev->vcn.vcn_pg_lock); @@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) const struct common_firmware_header *hdr; unsigned int offset; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); if (drm_dev_enter(adev_to_drm(adev), &idx)) { - memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, + memcpy_toio(adev->vcn.inst[i].cpu_addr, + adev->vcn.fw[i]->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); drm_dev_exit(idx); } @@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; + + hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data; /* currently only support 2 FW instances */ if (i >= 2) { dev_info(adev->dev, "More then 2 VCN FW instances!\n"); @@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) } idx = AMDGPU_UCODE_ID_VCN + i; adev->firmware.ucode[idx].ucode_id = idx; - adev->firmware.ucode[idx].fw = adev->vcn.fw; + adev->firmware.ucode[idx].fw = adev->vcn.fw[i]; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 1985f71b4373..a418393d89ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -306,7 +306,7 @@ struct amdgpu_vcn_ras { struct amdgpu_vcn { unsigned fw_version; struct delayed_work idle_work; - const struct firmware *fw; /* VCN firmware */ + const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */ unsigned num_enc_rings; enum amd_powergating_state cur_state; bool indirect_sram; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 70c5cc80ecdc..7a65a2b128ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -575,9 +575,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring, { unsigned int ret; - if (ring->adev->vpe.collaborate_mode) - return ~0; - amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0)); amdgpu_ring_write(ring, lower_32_bits(addr)); amdgpu_ring_write(ring, upper_32_bits(addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 904b9ff5ead2..f90905ef32c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) + return; + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): soc15_program_register_sequence(adev, @@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) u32 tmp; int i; - WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); + if (!amdgpu_sriov_vf(adev)) + WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); gfx_v10_0_setup_rb(adev); gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); @@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle) if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) gfx_v10_3_program_pbb_mode(adev); - if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev)) gfx_v10_3_set_power_brake_sequence(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index cd0e8a321e46..17509f32f61a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; + if (amdgpu_sriov_vf(adev)) + return; + /* Program the AGP BAR */ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 16fe428c0722..7aed96fa10a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c index b3961968c10c..238ea40c2450 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c @@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev, switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 3, 0): case IP_VERSION(3, 3, 1): - mmhub_cid = mmhub_client_ids_v3_3[cid][rw]; + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ? + mmhub_client_ids_v3_3[cid][rw] : + cid == 0x140 ? "UMSCH" : NULL; break; default: mmhub_cid = NULL; break; } - if (!mmhub_cid && cid == 0x140) - mmhub_cid = "UMSCH"; - dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", mmhub_cid ? mmhub_cid : "unknown", cid); dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index eaa4f5f49949..103dc9c7325f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 doorbell_offset, doorbell; u32 rb_cntl, ib_cntl; - int i, unset = 0; + int i; for_each_inst(i, inst_mask) { sdma[i] = &adev->sdma.instance[i].ring; - if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - unset = 1; - } - rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0); WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); @@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev, static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev, uint32_t inst_mask) { - struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; int i; - bool unset = false; for_each_inst(i, inst_mask) { - sdma[i] = &adev->sdma.instance[i].page; - - if ((adev->mman.buffer_funcs_ring == sdma[i]) && - (!unset)) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - unset = true; - } - rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 0); @@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, r = amdgpu_ring_test_helper(page); if (r) return r; - - if (adev->mman.buffer_funcs_ring == page) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, true); } return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 25ba27151ac0..aaceecd558cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle) */ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); uint32_t offset; /* cache window 0: fw */ @@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); uint32_t offset; /* cache window 0: fw */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 18794394c5a0..e357d8cf0c01 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle) */ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); uint32_t offset; if (amdgpu_sriov_vf(adev)) @@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); uint32_t offset; /* cache window 0: fw */ @@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) init_table += header->vcn_table_offset; - size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4); MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT( SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index aba403d71806..1cd8a94b0fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle) */ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size; uint32_t offset; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4); /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4); uint32_t offset; /* cache window 0: fw */ @@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); - size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4); /* mc resume*/ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { MMSCH_V1_0_INSERT_DIRECT_WT( diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index e02af4de521c..8f82fb887e9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle) */ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4); uint32_t offset; /* cache window 0: fw */ @@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { - uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4); uint32_t offset; /* cache window 0: fw */ @@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) mmUVD_STATUS), ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); - cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 8ab01ae919d2..832d15f7b5f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst) uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx { uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) regUVD_STATUS), ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); - cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 810bbfccd6f2..203fa988322b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx) uint32_t offset, size, vcn_inst; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); vcn_inst = GET_INST(VCN, inst_idx); @@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); - cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 0468955338b7..501e53e69f2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -45,7 +45,7 @@ #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 -#define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000) #define VCN_HARVEST_MMSCH 0 @@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst) uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); } /** @@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b VCN, inst_idx, regUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); - if (indirect) amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index d6ee9958ba5f..bc60c554eb32 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst) uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ @@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i uint32_t offset, size; const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); /* cache window 0: fw */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1c9c6096e28f..2851719d7121 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1767,6 +1767,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; + if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) + adev->dm.dc->debug.using_dml2 = true; + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ @@ -11271,18 +11274,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; - if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || sink->sink_signal == SIGNAL_TYPE_EDP) { + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { bool edid_check_required = false; - if (edid) { - edid_check_required = is_dp_capable_without_timing_msa( - adev->dm.dc, - amdgpu_dm_connector); + if (is_dp_capable_without_timing_msa(adev->dm.dc, + amdgpu_dm_connector)) { + if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { + freesync_capable = true; + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + } else { + edid_check_required = edid->version > 1 || + (edid->version == 1 && + edid->revision > 1); + } } - if (edid_check_required == true && (edid->version > 1 || - (edid->version == 1 && edid->revision > 1))) { + if (edid_check_required) { for (i = 0; i < 4; i++) { timing = &edid->detailed_timings[i]; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 85b7f58a7f35..c27063305a13 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): + case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): + case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.remove_sink_ext_caps = true; break; @@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; + apply_edid_quirks(edid_buf, edid_caps); + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) return result; @@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( else edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; - apply_edid_quirks(edid_buf, edid_caps); - kfree(sads); kfree(sadb); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 668f05c8654e..bec252e1dd27 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; + /* DPPCLK */ + dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz, + &num_entries_per_clk->num_dppclk_levels); + num_levels = num_entries_per_clk->num_dppclk_levels; + clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK); + //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x + if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950; + if (num_entries_per_clk->num_dcfclk_levels && num_entries_per_clk->num_dtbclk_levels && num_entries_per_clk->num_dispclk_levels) @@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz); } + for (i = 0; i < num_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950) + clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950; + /* Get UCLK, update bounding box */ clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5211c1c0f3c0..e7dc128f6284 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required( } } +/** + * wait_for_blank_complete - wait for all active OPPs to finish pending blank + * pattern updates + * + * @dc: [in] dc reference + * @context: [in] hardware context in use + */ +static void wait_for_blank_complete(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *opp_head; + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (!hws->funcs.wait_for_blank_complete) + return; + + for (i = 0; i < MAX_PIPES; i++) { + opp_head = &context->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(opp_head, OPP_HEAD) || + dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) + continue; + + hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); + } +} + +static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) +{ + struct pipe_ctx *otg_master; + struct timing_generator *tg; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + otg_master = &context->res_ctx.pipe_ctx[i]; + if (!resource_is_pipe_type(otg_master, OTG_MASTER) || + dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) + continue; + tg = otg_master->stream_res.tg; + if (tg->funcs->wait_odm_doublebuffer_pending_clear) + tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + } + + /* ODM update may require to reprogram blank pattern for each OPP */ + wait_for_blank_complete(dc, context); +} + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; @@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); + /* + * optimized dispclk depends on ODM setup. Need to wait for ODM + * update pending complete before optimizing bandwidth. + */ + wait_for_odm_update_pending_complete(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); /* Need to do otg sync again as otg could be out of sync due to otg @@ -3270,6 +3323,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s if (stream->link->replay_settings.config.replay_supported) return true; + if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) + return true; + return false; } @@ -3493,7 +3549,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, top_pipe_to_program->stream->update_flags.raw = 0; } -static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) +static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) { /* * This function calls HWSS to wait for any potentially double buffered @@ -3531,6 +3587,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state } } } + wait_for_odm_update_pending_complete(dc, dc_context); } static void commit_planes_for_stream(struct dc *dc, @@ -4844,22 +4901,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc) bool dc_dmub_is_ips_idle_state(struct dc *dc) { - uint32_t idle_state = 0; - if (dc->debug.disable_idle_power_optimizations) return false; if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return false; - if (dc->hwss.get_idle_state) - idle_state = dc->hwss.get_idle_state(dc); - - if (!(idle_state & DMUB_IPS1_ALLOW_MASK) || - !(idle_state & DMUB_IPS2_ALLOW_MASK)) - return true; + if (!dc->ctx->dmub_srv) + return false; - return false; + return dc->ctx->dmub_srv->idle_allowed; } /* set min and max memory clock to lowest and highest DPM level, respectively */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 180ac47868c2..5cc7f8da209c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref) void dc_state_release(struct dc_state *state) { - kref_put(&state->refcount, dc_state_free); + if (state != NULL) + kref_put(&state->refcount, dc_state_free); } /* * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 9900dda2eef5..be2ac5c442a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1085,9 +1085,9 @@ struct replay_settings { /* SMU optimization is enabled */ bool replay_smu_opt_enable; /* Current Coasting vtotal */ - uint16_t coasting_vtotal; + uint32_t coasting_vtotal; /* Coasting vtotal table */ - uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; /* Maximum link off frame count */ enum replay_link_off_frame_count_level link_off_frame_count_level; /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 48a40dcc7050..5838a11efd00 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_set_disp_pattern_generator = NULL, .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, + .dpg_is_pending = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 0784d0198661..fbf1b6370eb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) (double_buffer_pending == 0); } +bool opp2_dpg_is_pending(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t double_buffer_pending; + uint32_t dpg_en; + + REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); + + REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1 && double_buffer_pending == 1); +} + void opp2_program_left_edge_extra_pixel ( struct output_pixel_processor *opp, bool count) @@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 3ab221bdd27d..8f186abd558d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions( bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); +bool opp2_dpg_is_pending(struct output_pixel_processor *opp); + void opp2_dpg_set_blank_color( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c index 8e77db46a409..6a71ba3dfc63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c @@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 87760600e154..f98def6c8c2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc pipe_cnt++; } } + +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context) +{ + if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ) + context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b49e1dc9d8ba..a0a65e099104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * - Not TMZ surface */ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 1ba6933d2b36..17a58f41fc6a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) { + dml_uint_t width, height; + + if (in->timing.h_addressable > 3840) + width = 3840; + else + width = in->timing.h_addressable; // 4K max + + if (in->timing.v_addressable > 2160) + height = 2160; + else + height = in->timing.v_addressable; // 4K max + out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; out->GPUVMMinPageSizeKBytes[location] = 256; - out->ViewportWidth[location] = in->timing.h_addressable; - out->ViewportHeight[location] = in->timing.v_addressable; + out->ViewportWidth[location] = width; + out->ViewportHeight[location] = height; out->ViewportStationary[location] = false; out->ViewportWidthChroma[location] = 0; out->ViewportHeightChroma[location] = 0; @@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->HTapsChroma[location] = 0; out->VTapsChroma[location] = 0; out->SourceScan[location] = dml_rotation_0; - out->ScalerRecoutWidth[location] = in->timing.h_addressable; + out->ScalerRecoutWidth[location] = width; out->LBBitPerPixel[location] = 57; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 2a58a7687bdb..72cca367062e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void) return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); } -bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - // Allocate Mode Lib Ctx - *dml2 = dml2_allocate_memory(); - - if (!(*dml2)) - return false; // Store config options (*dml2)->config = *config; @@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); +} + +bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +{ + // Allocate Mode Lib Ctx + *dml2 = dml2_allocate_memory(); + + if (!(*dml2)) + return false; + + dml2_init(in_dc, config, dml2); - /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/ - //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init); return true; } @@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2, return true; } + +void dml2_reinit(const struct dc *in_dc, + const struct dml2_configuration_options *config, + struct dml2_context **dml2) +{ + + dml2_init(in_dc, config, dml2); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index ee0eb184eb6d..cc662d682fd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2, struct dml2_context *src_dml2); bool dml2_create_copy(struct dml2_context **dst_dml2, struct dml2_context *src_dml2); +void dml2_reinit(const struct dc *in_dc, + const struct dml2_configuration_options *config, + struct dml2_context **dml2); /* * dml2_validate - Determines if a display configuration is supported or not. diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index c55d5155ecb9..8b3536c380b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, return; } + if (resource_is_pipe_type(new_pipe, OTG_MASTER) && + resource_is_odm_topology_changed(new_pipe, old_pipe)) + /* Detect odm changes */ + new_pipe->update_flags.bits.odm = 1; + /* Exit on unchanged, unused pipe */ if (!old_pipe->plane_state && !new_pipe->plane_state) return; @@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, /* Detect top pipe only changes */ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { - /* Detect odm changes */ - if (resource_is_odm_topology_changed(new_pipe, old_pipe)) - new_pipe->update_flags.bits.odm = 1; - /* Detect global sync changes */ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start @@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx( DC_LOGGER_INIT(dc->ctx->logger); unsigned int prev_hubp_count = 0; unsigned int hubp_count = 0; + struct pipe_ctx *pipe; if (resource_is_pipe_topology_changed(dc->current_state, context)) resource_log_pipe_topology_update(dc, context); if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { - ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); + if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + ASSERT(!pipe->plane_state->triplebuffer_flips); /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( - dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + dc, pipe, pipe->plane_state->triplebuffer_flips); } } } @@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx( DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } + /* update ODM for blanked OTG master pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + if (resource_is_pipe_type(pipe, OTG_MASTER) && + !resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->update_flags.bits.odm && + hws->funcs.update_odm) + hws->funcs.update_odm(dc, context, pipe); + } + /* * Program all updated pipes, order matters for mpcc setup. Start with * top pipe and program all pipes that follow in order */ for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->plane_state && !pipe->top_pipe) { while (pipe) { @@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx( context->stream_status[0].plane_count > 1) { pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); } - - /* when dynamic ODM is active, pipes must be reconfigured when all planes are - * disabled, as some transitions will leave software and hardware state - * mismatched. - */ - if (dc->debug.enable_single_display_2to1_odm_policy && - pipe->stream && - pipe->update_flags.bits.disable && - !pipe->prev_odm_pipe && - hws->funcs.update_odm) - hws->funcs.update_odm(dc, context, pipe); } } @@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete( int counter; for (counter = 0; counter < 1000; counter++) { - if (opp->funcs->dpg_is_blanked(opp)) + if (!opp->funcs->dpg_is_pending(opp)) break; udelay(100); @@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete( return false; } - return true; + return opp->funcs->dpg_is_blanked(opp); } bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 7e6b7f2a6dc9..8bc3d01537bb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) if (pipe_ctx == NULL) return; - if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) + if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) { pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc, enable); + + /* Wait for two frame to make sure AV mute is sent out */ + if (enable) { + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + } + } } void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index aa36d7a56ca8..c0b526cf1786 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * dsc->funcs->dsc_disconnect(dsc); } } + + if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE)) + /* + * blank pattern is generated by OPP, reprogram blank pattern + * due to OPP count change + */ + dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true); } unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) @@ -1778,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc, context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; } } + +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock) +{ + unsigned int i; + struct pipe_ctx *pipe; + struct timing_generator *tg; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + + if (lock) + dc->hwss.pipe_control_lock(dc, pipe, true); + else + dc->hwss.pipe_control_lock(dc, pipe, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index 069e20bc87c0..f55c11fc56ec 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, void dcn32_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock); #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 2b073123d3ed..67d661dbd5b7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, + .interdependent_update_lock = dcn32_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn32_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index aee5372e292c..d89c92370d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -337,6 +337,9 @@ struct opp_funcs { bool (*dpg_is_blanked)( struct output_pixel_processor *opp); + bool (*dpg_is_pending)(struct output_pixel_processor *opp); + + void (*opp_dpg_set_blank_color)( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index d98d72f35be5..ffad8fe16c54 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -331,6 +331,7 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 26fe81f213da..bf29fc58ea6a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -285,12 +285,12 @@ struct link_service { enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint16_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, - const unsigned int *power_opts, uint16_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index acfbbc638cc6..3baa2bdd6dd6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link, return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link, } bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint16_t coasting_vtotal) + const unsigned int *power_opts, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 34e521af7bb4..a158c6234d42 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link, bool edp_send_replay_cmd(struct dc_link *link, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint16_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index ab81594a7fad..6c2e84d3967f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -557,7 +557,8 @@ struct dcn_optc_registers { type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ type OTG_CRC_DATA_FORMAT;\ type OTG_V_TOTAL_LAST_USED_BY_DRR;\ - type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; + type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\ + type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING; #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 823493543325..f07a4c7e48bc 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi } } +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000); +} + void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .set_odm_bypass = optc32_set_odm_bypass, .set_odm_combine = optc32_set_odm_combine, .get_odm_combine_segments = optc32_get_odm_combine_segments, + .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 8ce3b178cab0..0c2c14695561 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 3f3951f3ba98..ce1754cc1f46 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); dcn32_override_min_req_memclk(dc, context); + dcn32_override_min_req_dcfclk(dc, context); BW_VAL_TRACE_END_WATERMARKS(); @@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw { DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 0c87b0fabba7..2258c5c7212d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -42,6 +42,7 @@ #define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 #define DCN3_2_VMIN_DISPCLK_HZ 717000000 +#define MIN_SUBVP_DCFCLK_KHZ 400000 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) @@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index b356fed1726d..296a0a8e7145 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b { DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index a529e369b2ac..af3fe8bb0728 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /** + * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. + */ + uint16_t coasting_vtotal_high; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; }; /** diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 8c137d7c032e..7c9805705fd3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index e304e8435fb8..2a3698fd2dc2 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, void set_replay_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, - uint16_t vtotal) + uint32_t vtotal) { link->replay_settings.coasting_vtotal_table[type] = vtotal; } diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index bef4815e1703..ff7e6f3cd6be 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, void init_replay_config(struct dc_link *link, struct replay_config *pr_config); void set_replay_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, - uint16_t vtotal); + uint32_t vtotal); void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); void calculate_replay_link_off_frame_count(struct dc_link *link, uint16_t vtotal, uint16_t htotal); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 1d96eb274d72..0c2d04f978ac 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - struct smu_11_0_powerplay_table *powerplay_table = - (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t power_limit, od_percent_upper, od_percent_lower; + uint32_t power_limit; if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { /* the last hope to figure out the ppt limit */ @@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu, *current_power_limit = power_limit; if (default_power_limit) *default_power_limit = power_limit; - - if (smu->od_enabled) - od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - else - od_percent_upper = 0; - - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - - dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", - od_percent_upper, od_percent_lower, power_limit); - - if (max_power_limit) { - *max_power_limit = power_limit * (100 + od_percent_upper); - *max_power_limit /= 100; - } - - if (min_power_limit) { - *min_power_limit = power_limit * (100 - od_percent_lower); - *min_power_limit /= 100; - } + if (max_power_limit) + *max_power_limit = power_limit; + if (min_power_limit) + *min_power_limit = power_limit; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index ed189a3878eb..836b1df79928 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu, (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; struct smu_11_0_overdrive_table *od_settings = smu->od_settings; PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t power_limit, od_percent_upper, od_percent_lower; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { /* the last hope to figure out the ppt limit */ @@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled && - navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) - od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - else - od_percent_upper = 0; - - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + if (powerplay_table) { + if (smu->od_enabled && + navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + od_percent_upper = 0; + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + } + } dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index e2ad2b972ab0..1f18b61884f3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s return throttler_status; } +static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table, + enum SMU_11_0_7_ODFEATURE_CAP cap) +{ + return od_table->cap[cap]; +} + static int sienna_cichlid_get_power_limit(struct smu_context *smu, uint32_t *current_power_limit, uint32_t *default_power_limit, @@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, { struct smu_11_0_7_powerplay_table *powerplay_table = (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table; - uint32_t power_limit, od_percent_upper, od_percent_lower; + struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint16_t *table_member; GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member); @@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) - od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); - else - od_percent_upper = 0; - - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); + if (powerplay_table) { + if (smu->od_enabled && + sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); + } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) { + od_percent_upper = 0; + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); + } + } dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); @@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu, return dpm_desc->SnapToDiscrete == 0; } -static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table, - enum SMU_11_0_7_ODFEATURE_CAP cap) -{ - return od_table->cap[cap]; -} - static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table, enum SMU_11_0_7_ODSETTING_ID setting, uint32_t *min, uint32_t *max) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9b80f18ea6c3..9c03296f92cd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, (struct smu_13_0_0_powerplay_table *)table_context->power_play_table; PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; - uint32_t power_limit, od_percent_upper, od_percent_lower; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; if (smu_v13_0_get_current_power_limit(smu, &power_limit)) @@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) - od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); - else - od_percent_upper = 0; - - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); + if (powerplay_table) { + if (smu->od_enabled && + smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); + } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); + } + } dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 3dc7b60cb075..7318964f1f14 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, (struct smu_13_0_7_powerplay_table *)table_context->power_play_table; PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; - uint32_t power_limit, od_percent_upper, od_percent_lower; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; if (smu_v13_0_get_current_power_limit(smu, &power_limit)) @@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) - od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); - else - od_percent_upper = 0; - - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); + if (powerplay_table) { + if (smu->od_enabled && + (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) { + od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); + } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); + } + } dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); |