diff options
author | Rob Clark <robdclark@chromium.org> | 2024-03-03 18:31:21 -0800 |
---|---|---|
committer | Rob Clark <robdclark@chromium.org> | 2024-03-03 18:32:11 -0800 |
commit | 177bce60cd10a4ffdc9881bf6f2dff7880408c1d (patch) | |
tree | e3ad8ee7e90951be042ee37a75e2bb58e1d5b748 /drivers/gpu | |
parent | 18397519cb62248865ca33266a483dbcf7d08b5f (diff) | |
parent | 8df1ddb5bf11ab820ad991e164dab82c0960add9 (diff) |
Merge tag 'drm-misc-next-2024-02-29' into msm-next
Merge to pick up commit 47f419e07111 ("drm/dp: move
intel_dp_vsc_sdp_pack() to generic helper")
drm-misc-next for v6.9:
UAPI Changes:
Cross-subsystem Changes:
backlight:
- corgi: include backlight header
fbdev:
- Cleanup includes in public header file
- fbtft: Include backlight header
Core Changes:
edid:
- Remove built-in EDID data
dp:
- Avoid AUX transfers on powered-down displays
- Add VSC SDP helpers
modesetting:
- Add sanity checks for polling
- Cleanups
scheduler:
- Cleanups
tests:
- Add helpers for mode-setting tests
Driver Changes:
i915:
- Use shared VSC SDP helper
mgag200:
- Work around PCI write bursts
mxsfb:
- Use managed mode config
nouveau:
- Include backlight header where necessary
qiac:
- Cleanups
sun4:
- HDMI: updates to atomic mode setting
tegra:
- Fix GEM refounting in error paths
tidss:
- Fix multi display
- Fix initial Z position
v3d:
- Support display MMU page size
Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu')
754 files changed, 41698 insertions, 7292 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 260e32ef7bae..fa26a4e3a99d 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o @@ -98,7 +98,7 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o + nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o # add DF block amdgpu-y += \ @@ -132,7 +132,8 @@ amdgpu-y += \ vega20_ih.o \ navi10_ih.o \ ih_v6_0.o \ - ih_v6_1.o + ih_v6_1.o \ + ih_v7_0.o # add PSP block amdgpu-y += \ @@ -143,7 +144,8 @@ amdgpu-y += \ psp_v11_0_8.o \ psp_v12_0.o \ psp_v13_0.o \ - psp_v13_0_4.o + psp_v13_0_4.o \ + psp_v14_0.o # add DCE block amdgpu-y += \ @@ -208,6 +210,7 @@ amdgpu-y += \ vcn_v4_0.o \ vcn_v4_0_3.o \ vcn_v4_0_5.o \ + vcn_v5_0_0.o \ amdgpu_jpeg.o \ jpeg_v1_0.o \ jpeg_v2_0.o \ @@ -215,7 +218,8 @@ amdgpu-y += \ jpeg_v3_0.o \ jpeg_v4_0.o \ jpeg_v4_0_3.o \ - jpeg_v4_0_5.o + jpeg_v4_0_5.o \ + jpeg_v5_0_0.o # add VPE block amdgpu-y += \ @@ -233,7 +237,8 @@ amdgpu-y += \ athub_v1_0.o \ athub_v2_0.o \ athub_v2_1.o \ - athub_v3_0.o + athub_v3_0.o \ + athub_v4_1_0.o # add SMUIO block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3d8a48f46b01..9246bca0a008 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -107,6 +107,7 @@ #include "amdgpu_smuio.h" #include "amdgpu_fdinfo.h" #include "amdgpu_mca.h" +#include "amdgpu_aca.h" #include "amdgpu_ras.h" #include "amdgpu_xcp.h" #include "amdgpu_seq64.h" @@ -114,14 +115,12 @@ #define MAX_GPU_INSTANCE 64 -struct amdgpu_gpu_instance -{ +struct amdgpu_gpu_instance { struct amdgpu_device *adev; int mgpu_fan_enabled; }; -struct amdgpu_mgpu_info -{ +struct amdgpu_mgpu_info { struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; struct mutex mutex; uint32_t num_gpu; @@ -140,8 +139,7 @@ enum amdgpu_ss { AMDGPU_SS_DRV_UNLOAD }; -struct amdgpu_watchdog_timer -{ +struct amdgpu_watchdog_timer { bool timeout_fatal_disable; uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ }; @@ -198,8 +196,9 @@ extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; -extern uint amdgpu_dm_abm_level; +extern int amdgpu_dm_abm_level; extern int amdgpu_backlight; +extern int amdgpu_damage_clips; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; @@ -1045,6 +1044,9 @@ struct amdgpu_device { /* MCA */ struct amdgpu_mca mca; + /* ACA */ + struct amdgpu_aca aca; + struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; uint32_t harvest_ip_mask; int num_ip_blocks; @@ -1078,6 +1080,8 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + /* indicate amdgpu suspension status */ + bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1092,6 +1096,7 @@ struct amdgpu_device { long sdma_timeout; long video_timeout; long compute_timeout; + long psp_timeout; uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; @@ -1329,6 +1334,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) +#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l)) /* * BIOS helpers. */ @@ -1547,9 +1553,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif #if defined(CONFIG_DRM_AMD_DC) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c new file mode 100644 index 000000000000..493982f94649 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -0,0 +1,879 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/list.h> +#include "amdgpu.h" +#include "amdgpu_aca.h" +#include "amdgpu_ras.h" + +#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype} + +typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data); + +struct aca_banks { + int nr_banks; + struct list_head list; +}; + +struct aca_hwip { + int hwid; + int mcatype; +}; + +static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = { + ACA_BANK_HWID(SMU, 0x01, 0x01), + ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00), + ACA_BANK_HWID(UMC, 0x96, 0x00), +}; + +static void aca_banks_init(struct aca_banks *banks) +{ + if (!banks) + return; + + memset(banks, 0, sizeof(*banks)); + INIT_LIST_HEAD(&banks->list); +} + +static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank) +{ + struct aca_bank_node *node; + + if (!bank) + return -EINVAL; + + node = kvzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + memcpy(&node->bank, bank, sizeof(*bank)); + + INIT_LIST_HEAD(&node->node); + list_add_tail(&node->node, &banks->list); + + banks->nr_banks++; + + return 0; +} + +static void aca_banks_release(struct aca_banks *banks) +{ + struct aca_bank_node *node, *tmp; + + list_for_each_entry_safe(node, tmp, &banks->list, node) { + list_del(&node->node); + kvfree(node); + } +} + +static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count) +{ + struct amdgpu_aca *aca = &adev->aca; + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; + + if (!count) + return -EINVAL; + + if (!smu_funcs || !smu_funcs->get_valid_aca_count) + return -EOPNOTSUPP; + + return smu_funcs->get_valid_aca_count(adev, type, count); +} + +static struct aca_regs_dump { + const char *name; + int reg_idx; +} aca_regs[] = { + {"CONTROL", ACA_REG_IDX_CTL}, + {"STATUS", ACA_REG_IDX_STATUS}, + {"ADDR", ACA_REG_IDX_ADDR}, + {"MISC", ACA_REG_IDX_MISC0}, + {"CONFIG", ACA_REG_IDX_CONFG}, + {"IPID", ACA_REG_IDX_IPID}, + {"SYND", ACA_REG_IDX_SYND}, + {"DESTAT", ACA_REG_IDX_DESTAT}, + {"DEADDR", ACA_REG_IDX_DEADDR}, + {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK}, +}; + +static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank) +{ + int i; + + dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); + /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ + for (i = 0; i < ARRAY_SIZE(aca_regs); i++) + dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", + idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); +} + +static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type, + int start, int count, + struct aca_banks *banks) +{ + struct amdgpu_aca *aca = &adev->aca; + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; + struct aca_bank bank; + int i, max_count, ret; + + if (!count) + return 0; + + if (!smu_funcs || !smu_funcs->get_valid_aca_bank) + return -EOPNOTSUPP; + + switch (type) { + case ACA_ERROR_TYPE_UE: + max_count = smu_funcs->max_ue_bank_count; + break; + case ACA_ERROR_TYPE_CE: + max_count = smu_funcs->max_ce_bank_count; + break; + case ACA_ERROR_TYPE_DEFERRED: + default: + return -EINVAL; + } + + if (start + count >= max_count) + return -EINVAL; + + count = min_t(int, count, max_count); + for (i = 0; i < count; i++) { + memset(&bank, 0, sizeof(bank)); + ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank); + if (ret) + return ret; + + aca_smu_bank_dump(adev, i, count, &bank); + + ret = aca_banks_add_bank(banks, &bank); + if (ret) + return ret; + } + + return 0; +} + +static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type) +{ + + struct aca_hwip *hwip; + int hwid, mcatype; + u64 ipid; + + if (!bank || type == ACA_HWIP_TYPE_UNKNOW) + return false; + + hwip = &aca_hwid_mcatypes[type]; + if (!hwip->hwid) + return false; + + ipid = bank->regs[ACA_REG_IDX_IPID]; + hwid = ACA_REG__IPID__HARDWAREID(ipid); + mcatype = ACA_REG__IPID__MCATYPE(ipid); + + return hwip->hwid == hwid && hwip->mcatype == mcatype; +} + +static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type) +{ + const struct aca_bank_ops *bank_ops = handle->bank_ops; + + if (!aca_bank_hwip_is_matched(bank, handle->hwip)) + return false; + + if (!bank_ops->aca_bank_is_valid) + return true; + + return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data); +} + +static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info) +{ + struct aca_bank_error *bank_error; + + bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL); + if (!bank_error) + return NULL; + + INIT_LIST_HEAD(&bank_error->node); + memcpy(&bank_error->info, info, sizeof(*info)); + + mutex_lock(&aerr->lock); + list_add_tail(&bank_error->node, &aerr->list); + mutex_unlock(&aerr->lock); + + return bank_error; +} + +static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info) +{ + struct aca_bank_error *bank_error = NULL; + struct aca_bank_info *tmp_info; + bool found = false; + + mutex_lock(&aerr->lock); + list_for_each_entry(bank_error, &aerr->list, node) { + tmp_info = &bank_error->info; + if (tmp_info->socket_id == info->socket_id && + tmp_info->die_id == info->die_id) { + found = true; + goto out_unlock; + } + } + +out_unlock: + mutex_unlock(&aerr->lock); + + return found ? bank_error : NULL; +} + +static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error) +{ + if (!aerr || !bank_error) + return; + + list_del(&bank_error->node); + aerr->nr_errors--; + + kvfree(bank_error); +} + +static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info) +{ + struct aca_bank_error *bank_error; + + if (!aerr || !info) + return NULL; + + bank_error = find_bank_error(aerr, info); + if (bank_error) + return bank_error; + + return new_bank_error(aerr, info); +} + +static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type, + struct aca_bank_report *report) +{ + struct aca_error_cache *error_cache = &handle->error_cache; + struct aca_bank_error *bank_error; + struct aca_error *aerr; + + if (!handle || !report) + return -EINVAL; + + if (!report->count[type]) + return 0; + + aerr = &error_cache->errors[type]; + bank_error = get_bank_error(aerr, &report->info); + if (!bank_error) + return -ENOMEM; + + bank_error->count[type] += report->count[type]; + + return 0; +} + +static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank, + enum aca_error_type type, struct aca_bank_report *report) +{ + const struct aca_bank_ops *bank_ops = handle->bank_ops; + + if (!bank || !report) + return -EINVAL; + + if (!bank_ops->aca_bank_generate_report) + return -EOPNOTSUPP; + + memset(report, 0, sizeof(*report)); + return bank_ops->aca_bank_generate_report(handle, bank, type, + report, handle->data); +} + +static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank, + enum aca_error_type type, void *data) +{ + struct aca_bank_report report; + int ret; + + ret = aca_generate_bank_report(handle, bank, type, &report); + if (ret) + return ret; + + if (!report.count[type]) + return 0; + + ret = aca_log_errors(handle, type, &report); + if (ret) + return ret; + + return 0; +} + +static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank, + enum aca_error_type type, bank_handler_t handler, void *data) +{ + struct aca_handle *handle; + int ret; + + if (list_empty(&mgr->list)) + return 0; + + list_for_each_entry(handle, &mgr->list, node) { + if (!aca_bank_is_valid(handle, bank, type)) + continue; + + ret = handler(handle, bank, type, data); + if (ret) + return ret; + } + + return 0; +} + +static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks, + enum aca_error_type type, bank_handler_t handler, void *data) +{ + struct aca_bank_node *node; + struct aca_bank *bank; + int ret; + + if (!mgr || !banks) + return -EINVAL; + + /* pre check to avoid unnecessary operations */ + if (list_empty(&mgr->list) || list_empty(&banks->list)) + return 0; + + list_for_each_entry(node, &banks->list, node) { + bank = &node->bank; + + ret = aca_dispatch_bank(mgr, bank, type, handler, data); + if (ret) + return ret; + } + + return 0; +} + +static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type, + bank_handler_t handler, void *data) +{ + struct amdgpu_aca *aca = &adev->aca; + struct aca_banks banks; + u32 count = 0; + int ret; + + if (list_empty(&aca->mgr.list)) + return 0; + + /* NOTE: pmfw is only support UE and CE */ + if (type == ACA_ERROR_TYPE_DEFERRED) + type = ACA_ERROR_TYPE_CE; + + ret = aca_smu_get_valid_aca_count(adev, type, &count); + if (ret) + return ret; + + if (!count) + return 0; + + aca_banks_init(&banks); + + ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks); + if (ret) + goto err_release_banks; + + if (list_empty(&banks.list)) { + ret = 0; + goto err_release_banks; + } + + ret = aca_dispatch_banks(&aca->mgr, &banks, type, + handler, data); + if (ret) + goto err_release_banks; + +err_release_banks: + aca_banks_release(&banks); + + return ret; +} + +static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data) +{ + struct aca_bank_info *info; + struct amdgpu_smuio_mcm_config_info mcm_info; + u64 count; + + if (type >= ACA_ERROR_TYPE_COUNT) + return -EINVAL; + + count = bank_error->count[type]; + if (!count) + return 0; + + info = &bank_error->info; + mcm_info.die_id = info->die_id; + mcm_info.socket_id = info->socket_id; + + switch (type) { + case ACA_ERROR_TYPE_UE: + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count); + break; + case ACA_ERROR_TYPE_CE: + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count); + break; + case ACA_ERROR_TYPE_DEFERRED: + default: + break; + } + + return 0; +} + +static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data) +{ + struct aca_error_cache *error_cache = &handle->error_cache; + struct aca_error *aerr = &error_cache->errors[type]; + struct aca_bank_error *bank_error, *tmp; + + mutex_lock(&aerr->lock); + + if (list_empty(&aerr->list)) + goto out_unlock; + + list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) { + aca_log_aca_error_data(bank_error, type, err_data); + aca_bank_error_remove(aerr, bank_error); + } + +out_unlock: + mutex_unlock(&aerr->lock); + + return 0; +} + +static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type, + struct ras_err_data *err_data) +{ + int ret; + + /* udpate aca bank to aca source error_cache first */ + ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL); + if (ret) + return ret; + + return aca_log_aca_error(handle, type, err_data); +} + +static bool aca_handle_is_valid(struct aca_handle *handle) +{ + if (!handle->mask || !list_empty(&handle->node)) + return false; + + return true; +} + +int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, + enum aca_error_type type, void *data) +{ + struct ras_err_data *err_data = (struct ras_err_data *)data; + + if (!handle || !err_data) + return -EINVAL; + + if (aca_handle_is_valid(handle)) + return -EOPNOTSUPP; + + if (!(BIT(type) & handle->mask)) + return 0; + + return __aca_get_error_data(adev, handle, type, err_data); +} + +static void aca_error_init(struct aca_error *aerr, enum aca_error_type type) +{ + mutex_init(&aerr->lock); + INIT_LIST_HEAD(&aerr->list); + aerr->type = type; + aerr->nr_errors = 0; +} + +static void aca_init_error_cache(struct aca_handle *handle) +{ + struct aca_error_cache *error_cache = &handle->error_cache; + int type; + + for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++) + aca_error_init(&error_cache->errors[type], type); +} + +static void aca_error_fini(struct aca_error *aerr) +{ + struct aca_bank_error *bank_error, *tmp; + + mutex_lock(&aerr->lock); + list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) + aca_bank_error_remove(aerr, bank_error); + + mutex_destroy(&aerr->lock); +} + +static void aca_fini_error_cache(struct aca_handle *handle) +{ + struct aca_error_cache *error_cache = &handle->error_cache; + int type; + + for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++) + aca_error_fini(&error_cache->errors[type]); +} + +static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle, + const char *name, const struct aca_info *ras_info, void *data) +{ + memset(handle, 0, sizeof(*handle)); + + handle->adev = adev; + handle->mgr = mgr; + handle->name = name; + handle->hwip = ras_info->hwip; + handle->mask = ras_info->mask; + handle->bank_ops = ras_info->bank_ops; + handle->data = data; + aca_init_error_cache(handle); + + INIT_LIST_HEAD(&handle->node); + list_add_tail(&handle->node, &mgr->list); + mgr->nr_handles++; + + return 0; +} + +static ssize_t aca_sysfs_read(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr); + + /* NOTE: the aca cache will be auto cleared once read, + * So the driver should unify the query entry point, forward request to ras query interface directly */ + return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data); +} + +static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle) +{ + struct device_attribute *aca_attr = &handle->aca_attr; + + snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name); + aca_attr->show = aca_sysfs_read; + aca_attr->attr.name = handle->attr_name; + aca_attr->attr.mode = S_IRUGO; + sysfs_attr_init(&aca_attr->attr); + + return sysfs_add_file_to_group(&adev->dev->kobj, + &aca_attr->attr, + "ras"); +} + +int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle, + const char *name, const struct aca_info *ras_info, void *data) +{ + struct amdgpu_aca *aca = &adev->aca; + int ret; + + if (!amdgpu_aca_is_enabled(adev)) + return 0; + + ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data); + if (ret) + return ret; + + return add_aca_sysfs(adev, handle); +} + +static void remove_aca_handle(struct aca_handle *handle) +{ + struct aca_handle_manager *mgr = handle->mgr; + + aca_fini_error_cache(handle); + list_del(&handle->node); + mgr->nr_handles--; +} + +static void remove_aca_sysfs(struct aca_handle *handle) +{ + struct amdgpu_device *adev = handle->adev; + struct device_attribute *aca_attr = &handle->aca_attr; + + if (adev->dev->kobj.sd) + sysfs_remove_file_from_group(&adev->dev->kobj, + &aca_attr->attr, + "ras"); +} + +void amdgpu_aca_remove_handle(struct aca_handle *handle) +{ + if (!handle || list_empty(&handle->node)) + return; + + remove_aca_sysfs(handle); + remove_aca_handle(handle); +} + +static int aca_manager_init(struct aca_handle_manager *mgr) +{ + INIT_LIST_HEAD(&mgr->list); + mgr->nr_handles = 0; + + return 0; +} + +static void aca_manager_fini(struct aca_handle_manager *mgr) +{ + struct aca_handle *handle, *tmp; + + list_for_each_entry_safe(handle, tmp, &mgr->list, node) + amdgpu_aca_remove_handle(handle); +} + +bool amdgpu_aca_is_enabled(struct amdgpu_device *adev) +{ + return adev->aca.is_enabled; +} + +int amdgpu_aca_init(struct amdgpu_device *adev) +{ + struct amdgpu_aca *aca = &adev->aca; + int ret; + + ret = aca_manager_init(&aca->mgr); + if (ret) + return ret; + + return 0; +} + +void amdgpu_aca_fini(struct amdgpu_device *adev) +{ + struct amdgpu_aca *aca = &adev->aca; + + aca_manager_fini(&aca->mgr); +} + +int amdgpu_aca_reset(struct amdgpu_device *adev) +{ + amdgpu_aca_fini(adev); + + return amdgpu_aca_init(adev); +} + +void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs) +{ + struct amdgpu_aca *aca = &adev->aca; + + WARN_ON(aca->smu_funcs); + aca->smu_funcs = smu_funcs; +} + +int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info) +{ + u64 ipid; + u32 instidhi, instidlo; + + if (!bank || !info) + return -EINVAL; + + ipid = bank->regs[ACA_REG_IDX_IPID]; + info->hwid = ACA_REG__IPID__HARDWAREID(ipid); + info->mcatype = ACA_REG__IPID__MCATYPE(ipid); + /* + * Unfied DieID Format: SAASS. A:AID, S:Socket. + * Unfied DieID[4:4] = InstanceId[0:0] + * Unfied DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid); + instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid); + info->die_id = ((instidhi >> 2) & 0x03); + info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03); + + return 0; +} + +static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank) +{ + int error_code; + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) { + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); + return error_code & 0xff; + } + break; + default: + break; + } + + /* NOTE: the true error code is encoded in status.errorcode[0:7] */ + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + + return error_code & 0xff; +} + +int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size) +{ + int i, error_code; + + if (!bank || !err_codes) + return -EINVAL; + + error_code = aca_bank_get_error_code(adev, bank); + for (i = 0; i < size; i++) { + if (err_codes[i] == error_code) + return 0; + } + + return -EINVAL; +} + +int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en) +{ + struct amdgpu_aca *aca = &adev->aca; + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; + + if (!smu_funcs || !smu_funcs->set_debug_mode) + return -EOPNOTSUPP; + + return smu_funcs->set_debug_mode(adev, en); +} + +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + int ret; + + ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false); + if (ret) + return ret; + + dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off"); + + return 0; +} + +static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx) +{ + struct aca_bank_info info; + int i, ret; + + ret = aca_bank_info_decode(bank, &info); + if (ret) + return; + + seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE"); + seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", + idx, info.socket_id, info.die_id, info.hwid, info.mcatype); + + for (i = 0; i < ARRAY_SIZE(aca_regs); i++) + seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]); +} + +struct aca_dump_context { + struct seq_file *m; + int idx; +}; + +static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank, + enum aca_error_type type, void *data) +{ + struct aca_dump_context *ctx = (struct aca_dump_context *)data; + + aca_dump_entry(ctx->m, bank, type, ctx->idx++); + + return handler_aca_log_bank_error(handle, bank, type, NULL); +} + +static int aca_dump_show(struct seq_file *m, enum aca_error_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct aca_dump_context context = { + .m = m, + .idx = 0, + }; + + return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context); +} + +static int aca_dump_ce_show(struct seq_file *m, void *unused) +{ + return aca_dump_show(m, ACA_ERROR_TYPE_CE); +} + +static int aca_dump_ce_open(struct inode *inode, struct file *file) +{ + return single_open(file, aca_dump_ce_show, inode->i_private); +} + +static const struct file_operations aca_ce_dump_debug_fops = { + .owner = THIS_MODULE, + .open = aca_dump_ce_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int aca_dump_ue_show(struct seq_file *m, void *unused) +{ + return aca_dump_show(m, ACA_ERROR_TYPE_UE); +} + +static int aca_dump_ue_open(struct inode *inode, struct file *file) +{ + return single_open(file, aca_dump_ue_show, inode->i_private); +} + +static const struct file_operations aca_ue_dump_debug_fops = { + .owner = THIS_MODULE, + .open = aca_dump_ue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n"); +#endif + +void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) +{ +#if defined(CONFIG_DEBUG_FS) + if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) + return; + + debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops); + debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops); + debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h new file mode 100644 index 000000000000..2da50e095883 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -0,0 +1,202 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_ACA_H__ +#define __AMDGPU_ACA_H__ + +#include <linux/list.h> + +#define ACA_MAX_REGS_COUNT (16) + +#define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) +#define ACA_REG__STATUS__VAL(x) ACA_REG_FIELD(x, 63, 63) +#define ACA_REG__STATUS__OVERFLOW(x) ACA_REG_FIELD(x, 62, 62) +#define ACA_REG__STATUS__UC(x) ACA_REG_FIELD(x, 61, 61) +#define ACA_REG__STATUS__EN(x) ACA_REG_FIELD(x, 60, 60) +#define ACA_REG__STATUS__MISCV(x) ACA_REG_FIELD(x, 59, 59) +#define ACA_REG__STATUS__ADDRV(x) ACA_REG_FIELD(x, 58, 58) +#define ACA_REG__STATUS__PCC(x) ACA_REG_FIELD(x, 57, 57) +#define ACA_REG__STATUS__ERRCOREIDVAL(x) ACA_REG_FIELD(x, 56, 56) +#define ACA_REG__STATUS__TCC(x) ACA_REG_FIELD(x, 55, 55) +#define ACA_REG__STATUS__SYNDV(x) ACA_REG_FIELD(x, 53, 53) +#define ACA_REG__STATUS__CECC(x) ACA_REG_FIELD(x, 46, 46) +#define ACA_REG__STATUS__UECC(x) ACA_REG_FIELD(x, 45, 45) +#define ACA_REG__STATUS__DEFERRED(x) ACA_REG_FIELD(x, 44, 44) +#define ACA_REG__STATUS__POISON(x) ACA_REG_FIELD(x, 43, 43) +#define ACA_REG__STATUS__SCRUB(x) ACA_REG_FIELD(x, 40, 40) +#define ACA_REG__STATUS__ERRCOREID(x) ACA_REG_FIELD(x, 37, 32) +#define ACA_REG__STATUS__ADDRLSB(x) ACA_REG_FIELD(x, 29, 24) +#define ACA_REG__STATUS__ERRORCODEEXT(x) ACA_REG_FIELD(x, 21, 16) +#define ACA_REG__STATUS__ERRORCODE(x) ACA_REG_FIELD(x, 15, 0) + +#define ACA_REG__IPID__MCATYPE(x) ACA_REG_FIELD(x, 63, 48) +#define ACA_REG__IPID__INSTANCEIDHI(x) ACA_REG_FIELD(x, 47, 44) +#define ACA_REG__IPID__HARDWAREID(x) ACA_REG_FIELD(x, 43, 32) +#define ACA_REG__IPID__INSTANCEIDLO(x) ACA_REG_FIELD(x, 31, 0) + +#define ACA_REG__MISC0__VALID(x) ACA_REG_FIELD(x, 63, 63) +#define ACA_REG__MISC0__OVRFLW(x) ACA_REG_FIELD(x, 48, 48) +#define ACA_REG__MISC0__ERRCNT(x) ACA_REG_FIELD(x, 43, 32) + +#define ACA_REG__SYND__ERRORINFORMATION(x) ACA_REG_FIELD(x, 17, 0) + +/* NOTE: The following codes refers to the smu header file */ +#define ACA_EXTERROR_CODE_CE 0x3a +#define ACA_EXTERROR_CODE_FAULT 0x3b + +#define ACA_ERROR_UE_MASK BIT_MASK(ACA_ERROR_TYPE_UE) +#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE) +#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED) + +enum aca_reg_idx { + ACA_REG_IDX_CTL = 0, + ACA_REG_IDX_STATUS = 1, + ACA_REG_IDX_ADDR = 2, + ACA_REG_IDX_MISC0 = 3, + ACA_REG_IDX_CONFG = 4, + ACA_REG_IDX_IPID = 5, + ACA_REG_IDX_SYND = 6, + ACA_REG_IDX_DESTAT = 8, + ACA_REG_IDX_DEADDR = 9, + ACA_REG_IDX_CTL_MASK = 10, + ACA_REG_IDX_COUNT = 16, +}; + +enum aca_hwip_type { + ACA_HWIP_TYPE_UNKNOW = -1, + ACA_HWIP_TYPE_PSP = 0, + ACA_HWIP_TYPE_UMC, + ACA_HWIP_TYPE_SMU, + ACA_HWIP_TYPE_PCS_XGMI, + ACA_HWIP_TYPE_COUNT, +}; + +enum aca_error_type { + ACA_ERROR_TYPE_INVALID = -1, + ACA_ERROR_TYPE_UE = 0, + ACA_ERROR_TYPE_CE, + ACA_ERROR_TYPE_DEFERRED, + ACA_ERROR_TYPE_COUNT +}; + +struct aca_bank { + u64 regs[ACA_MAX_REGS_COUNT]; +}; + +struct aca_bank_node { + struct aca_bank bank; + struct list_head node; +}; + +struct aca_bank_info { + int die_id; + int socket_id; + int hwid; + int mcatype; +}; + +struct aca_bank_report { + struct aca_bank_info info; + u64 count[ACA_ERROR_TYPE_COUNT]; +}; + +struct aca_bank_error { + struct list_head node; + struct aca_bank_info info; + u64 count[ACA_ERROR_TYPE_COUNT]; +}; + +struct aca_error { + struct list_head list; + struct mutex lock; + enum aca_error_type type; + int nr_errors; +}; + +struct aca_handle_manager { + struct list_head list; + int nr_handles; +}; + +struct aca_error_cache { + struct aca_error errors[ACA_ERROR_TYPE_COUNT]; +}; + +struct aca_handle { + struct list_head node; + enum aca_hwip_type hwip; + struct amdgpu_device *adev; + struct aca_handle_manager *mgr; + struct aca_error_cache error_cache; + const struct aca_bank_ops *bank_ops; + struct device_attribute aca_attr; + char attr_name[64]; + const char *name; + u32 mask; + void *data; +}; + +struct aca_bank_ops { + int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + struct aca_bank_report *report, void *data); + bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + void *data); +}; + +struct aca_smu_funcs { + int max_ue_bank_count; + int max_ce_bank_count; + int (*set_debug_mode)(struct amdgpu_device *adev, bool enable); + int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count); + int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank); +}; + +struct amdgpu_aca { + struct aca_handle_manager mgr; + const struct aca_smu_funcs *smu_funcs; + bool is_enabled; +}; + +struct aca_info { + enum aca_hwip_type hwip; + const struct aca_bank_ops *bank_ops; + u32 mask; +}; + +int amdgpu_aca_init(struct amdgpu_device *adev); +void amdgpu_aca_fini(struct amdgpu_device *adev); +int amdgpu_aca_reset(struct amdgpu_device *adev); +void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs); +bool amdgpu_aca_is_enabled(struct amdgpu_device *adev); + +int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info); +int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size); + +int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle, + const char *name, const struct aca_info *aca_info, void *data); +void amdgpu_aca_remove_handle(struct aca_handle *handle); +int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, + enum aca_error_type type, void *data); +int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en); +void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2deebece810e..7099ff9cf8c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1519,4 +1519,22 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) #endif /* CONFIG_AMD_PMC */ } +/** + * amdgpu_choose_low_power_state + * + * @adev: amdgpu_device_pointer + * + * Choose the target low power state for the GPU + */ +void amdgpu_choose_low_power_state(struct amdgpu_device *adev) +{ + if (adev->in_runpm) + return; + + if (amdgpu_acpi_is_s0ix_active(adev)) + adev->in_s0ix = true; + else if (amdgpu_acpi_is_s3_active(adev)) + adev->in_s3 = true; +} + #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 41db030ddc4e..190039f14c30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -742,9 +742,10 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) amdgpu_device_flush_hdp(adev, NULL); } -void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, bool reset) { - amdgpu_umc_poison_handler(adev, reset); + amdgpu_umc_poison_handler(adev, block, reset); } int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 27c61c535e29..e60f63ccf79a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -193,6 +193,9 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, unsigned long cur_seq, struct kgd_mem *mem); +int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, + uint32_t domain, + struct dma_fence *fence); #else static inline bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) @@ -218,6 +221,13 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, { return 0; } +static inline +int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, + uint32_t domain, + struct dma_fence *fence) +{ + return 0; +} #endif /* Shared API */ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, @@ -326,7 +336,7 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, - bool reset); + enum amdgpu_ras_block block, bool reset); bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 231fd927dcfb..5cd84f72bf26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -426,9 +426,9 @@ validate_fail: return ret; } -static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, - uint32_t domain, - struct dma_fence *fence) +int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, + uint32_t domain, + struct dma_fence *fence) { int ret = amdgpu_bo_reserve(bo, false); @@ -464,13 +464,15 @@ static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) * again. Page directories are only updated after updating page * tables. */ -static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) +static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket) { struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; - ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); + ret = amdgpu_vm_validate(adev, vm, ticket, + amdgpu_amdkfd_validate_vm_bo, NULL); if (ret) { pr_err("failed to validate PT BOs\n"); return ret; @@ -1310,14 +1312,15 @@ update_gpuvm_pte_failed: return ret; } -static int process_validate_vms(struct amdkfd_process_info *process_info) +static int process_validate_vms(struct amdkfd_process_info *process_info, + struct ww_acquire_ctx *ticket) { struct amdgpu_vm *peer_vm; int ret; list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - ret = vm_validate_pt_pd_bos(peer_vm); + ret = vm_validate_pt_pd_bos(peer_vm, ticket); if (ret) return ret; } @@ -1402,7 +1405,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, ret = amdgpu_bo_reserve(vm->root.bo, true); if (ret) goto reserve_pd_fail; - ret = vm_validate_pt_pd_bos(vm); + ret = vm_validate_pt_pd_bos(vm, NULL); if (ret) { pr_err("validate_pt_pd_bos() failed\n"); goto validate_pd_fail; @@ -2043,7 +2046,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( bo->tbo.resource->mem_type == TTM_PL_SYSTEM) is_invalid_userptr = true; - ret = vm_validate_pt_pd_bos(avm); + ret = vm_validate_pt_pd_bos(avm, NULL); if (unlikely(ret)) goto out_unreserve; @@ -2136,7 +2139,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( goto unreserve_out; } - ret = vm_validate_pt_pd_bos(avm); + ret = vm_validate_pt_pd_bos(avm, NULL); if (unlikely(ret)) goto unreserve_out; @@ -2634,7 +2637,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) } } - ret = process_validate_vms(process_info); + ret = process_validate_vms(process_info, NULL); if (ret) goto unreserve_out; @@ -2894,11 +2897,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * amdgpu_sync_create(&sync_obj); - /* Validate PDs and PTs */ - ret = process_validate_vms(process_info); - if (ret) - goto validate_map_fail; - /* Validate BOs and map them to GPUVM (update VM page tables). */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { @@ -2949,6 +2947,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * if (failed_size) pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); + /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO + * validations above would invalidate DMABuf imports again. + */ + ret = process_validate_vms(process_info, &exec.ticket); + if (ret) + goto validate_map_fail; + /* Update mappings not managed by KFD */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { @@ -3020,7 +3025,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * &process_info->eviction_fence->base, DMA_RESV_USAGE_BOOKKEEP); } - /* Attach eviction fence to PD / PT BOs */ + /* Attach eviction fence to PD / PT BOs and DMABuf imports */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { struct amdgpu_bo *bo = peer_vm->root.bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index dce9e7d5e4ec..52b12c1718eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1018,7 +1018,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, if (clock_type == COMPUTE_ENGINE_PLL_PARAM) { args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); dividers->post_div = args.v3.ucPostDiv; dividers->enable_post_div = (args.v3.ucCntlFlag & @@ -1038,7 +1039,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, if (strobe_mode) args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); dividers->post_div = args.v5.ucPostDiv; dividers->enable_post_div = (args.v5.ucCntlFlag & @@ -1056,7 +1058,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, /* fusion */ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); dividers->post_divider = dividers->post_div = args.v4.ucPostDiv; dividers->real_clock = le32_to_cpu(args.v4.ulClock); @@ -1067,7 +1070,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, args.v6_in.ulClock.ulComputeClockFlag = clock_type; args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */ - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv); dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac); @@ -1109,7 +1113,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, if (strobe_mode) args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac); mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv); @@ -1151,7 +1156,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, if (mem_clock) args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); } void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, @@ -1205,7 +1211,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, args.v2.ucVoltageMode = 0; args.v2.usVoltageLevel = 0; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); *voltage = le16_to_cpu(args.v2.usVoltageLevel); break; @@ -1214,7 +1221,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; args.v3.usVoltageLevel = cpu_to_le16(voltage_id); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, + sizeof(args)); *voltage = le16_to_cpu(args.v3.usVoltageLevel); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index fb2681dd6b33..6857c586ded7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -941,5 +941,6 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset) return -EINVAL; } - return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1); + return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1, + sizeof(asic_init_ps_v2_1)); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index c7eb2caec65a..649b5530d8ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -36,7 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev); -bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address); +bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t *i2c_address); bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev); bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6adeddfb3d56..0a4b09709cfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -952,10 +952,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bytes_moved = 0; p->bytes_moved_vis = 0; - r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, - amdgpu_cs_bo_validate, p); + r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL, + amdgpu_cs_bo_validate, p); if (r) { - DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); + DRM_ERROR("amdgpu_vm_validate() failed.\n"); goto out_free_user_pages; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 796fa6f1420b..cfdf558b48b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -28,9 +28,8 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) { - uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; + uint64_t addr = AMDGPU_VA_RESERVED_CSA_START(adev); - addr -= AMDGPU_VA_RESERVED_SIZE; addr = amdgpu_gmc_sign_extend(addr); return addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fdde7488d0ed..b0ea4ddc8e72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -96,6 +96,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 #define AMDGPU_MAX_RETRY_LIMIT 2 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) +#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2) +#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2) +#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2) static const struct drm_driver amdgpu_kms_driver; @@ -781,12 +784,22 @@ u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, void __iomem *pcie_index_hi_offset; void __iomem *pcie_data_offset; - pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); - pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); - if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) - pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); - else + if (unlikely(!adev->nbio.funcs)) { + pcie_index = AMDGPU_PCIE_INDEX_FALLBACK; + pcie_data = AMDGPU_PCIE_DATA_FALLBACK; + } else { + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + } + + if (reg_addr >> 32) { + if (unlikely(!adev->nbio.funcs)) + pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK; + else + pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); + } else { pcie_index_hi = 0; + } spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; @@ -1218,8 +1231,6 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { amdgpu_psp_wait_for_bootloader(adev); ret = amdgpu_atomfirmware_asic_init(adev, true); - /* TODO: check the return val and stop device initialization if boot fails */ - amdgpu_psp_query_boot_status(adev); return ret; } else { return amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1442,6 +1453,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ + if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) + DRM_WARN("System can't access extended configuration space,please check!!\n"); + /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) @@ -4514,13 +4529,15 @@ int amdgpu_device_prepare(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); int i, r; + amdgpu_choose_low_power_state(adev); + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* Evict the majority of BOs before starting suspend sequence */ r = amdgpu_device_evict_resources(adev); if (r) - return r; + goto unprepare; for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) @@ -4529,10 +4546,15 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); if (r) - return r; + goto unprepare; } return 0; + +unprepare: + adev->in_s0ix = adev->in_s3 = false; + + return r; } /** @@ -4569,7 +4591,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); - flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); @@ -5680,6 +5701,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); } else { @@ -6101,6 +6123,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) struct amdgpu_reset_context reset_context; u32 memsize; struct list_head device_list; + struct amdgpu_hive_info *hive; + int hive_ras_recovery = 0; + struct amdgpu_ras *ras; + + /* PCI error slot reset should be skipped During RAS recovery */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + ras = amdgpu_ras_get_context(adev); + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) && + ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + return PCI_ERS_RESULT_RECOVERED; DRM_INFO("PCI error: slot reset callback!!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index c7d60dd0fb97..78588334577a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -27,6 +27,7 @@ #include "amdgpu_discovery.h" #include "soc15_hw_ip.h" #include "discovery.h" +#include "amdgpu_ras.h" #include "soc15.h" #include "gfx_v9_0.h" @@ -63,17 +64,20 @@ #include "hdp_v5_0.h" #include "hdp_v5_2.h" #include "hdp_v6_0.h" +#include "hdp_v7_0.h" #include "nv.h" #include "soc21.h" #include "navi10_ih.h" #include "ih_v6_0.h" #include "ih_v6_1.h" +#include "ih_v7_0.h" #include "gfx_v10_0.h" #include "gfx_v11_0.h" #include "sdma_v5_0.h" #include "sdma_v5_2.h" #include "sdma_v6_0.h" #include "lsdma_v6_0.h" +#include "lsdma_v7_0.h" #include "vcn_v2_0.h" #include "jpeg_v2_0.h" #include "vcn_v3_0.h" @@ -92,12 +96,15 @@ #include "smuio_v13_0.h" #include "smuio_v13_0_3.h" #include "smuio_v13_0_6.h" +#include "vcn_v5_0_0.h" +#include "jpeg_v5_0_0.h" #include "amdgpu_vpe.h" #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); +#define mmIP_DISCOVERY_VERSION 0x16A00 #define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmMP0_SMN_C2PMSG_33 0x16061 #define mmMM_INDEX 0x0 @@ -518,7 +525,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) out: kfree(adev->mman.discovery_bin); adev->mman.discovery_bin = NULL; - + if ((amdgpu_discovery != 2) && + (RREG32(mmIP_DISCOVERY_VERSION) == 4)) + amdgpu_ras_query_boot_status(adev, 4); return r; } @@ -1278,11 +1287,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) * 0b10 : encode is disabled * 0b01 : decode is disabled */ - adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = - ip->revision & 0xc0; - ip->revision &= ~0xc0; if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES) { + adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = + ip->revision & 0xc0; adev->vcn.num_vcn_inst++; adev->vcn.inst_mask |= (1U << ip->instance_number); @@ -1293,6 +1301,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) adev->vcn.num_vcn_inst + 1, AMDGPU_MAX_VCN_INSTANCES); } + ip->revision &= ~0xc0; } if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || le16_to_cpu(ip->hw_id) == SDMA1_HWID || @@ -1763,6 +1772,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(6, 1, 0): amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); break; + case IP_VERSION(7, 0, 0): + amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); + break; default: dev_err(adev->dev, "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", @@ -1812,11 +1824,16 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; case IP_VERSION(13, 0, 4): amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); break; + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); + break; default: dev_err(adev->dev, "Failed to add psp ip block(MP0_HWIP:0x%x)\n", @@ -2033,6 +2050,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 1, 1): amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); break; default: @@ -2122,6 +2140,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); break; + case IP_VERSION(5, 0, 0): + amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); + break; default: dev_err(adev->dev, "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", @@ -2493,6 +2515,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; break; case IP_VERSION(7, 11, 0): + case IP_VERSION(7, 11, 1): adev->nbio.funcs = &nbio_v7_11_funcs; adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg; break; @@ -2560,6 +2583,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(6, 1, 0): adev->hdp.funcs = &hdp_v6_0_funcs; break; + case IP_VERSION(7, 0, 0): + adev->hdp.funcs = &hdp_v7_0_funcs; + break; default: break; } @@ -2624,6 +2650,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 8): case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): adev->smuio.funcs = &smuio_v13_0_6_funcs; break; default: @@ -2637,6 +2664,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(6, 0, 3): adev->lsdma.funcs = &lsdma_v6_0_funcs; break; + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + adev->lsdma.funcs = &lsdma_v7_0_funcs; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index decbbe3d4f06..055ba2ea4c12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -377,6 +377,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) struct amdgpu_vm_bo_base *bo_base; int r; + /* FIXME: This should be after the "if", but needs a fix to make sure + * DMABuf imports are initialized in the right VM list. + */ + amdgpu_vm_bo_invalidate(adev, bo, false); if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 971acf01bea6..af7fae7907d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */ uint amdgpu_debug_mask; int amdgpu_agp = -1; /* auto */ int amdgpu_wbrf = -1; +int amdgpu_damage_clips = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -366,7 +367,7 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); * Setting the value to 0 disables this functionality. * Setting the value to -2 is auto enabled with power down when displays are attached. */ -MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = autowith displays)"); +MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); /** @@ -593,7 +594,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); #ifdef CONFIG_DRM_AMDGPU_SI #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) -int amdgpu_si_support = 0; +int amdgpu_si_support; MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); #else int amdgpu_si_support = 1; @@ -612,7 +613,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444); #ifdef CONFIG_DRM_AMDGPU_CIK #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) -int amdgpu_cik_support = 0; +int amdgpu_cik_support; MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); #else int amdgpu_cik_support = 1; @@ -848,18 +849,31 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444); * the ABM algorithm, with 1 being the least reduction and 4 being the most * reduction. * - * Defaults to 0, or disabled. Userspace can still override this level later - * after boot. + * Defaults to -1, or disabled. Userspace can only override this level after + * boot if it's set to auto. */ -uint amdgpu_dm_abm_level; -MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) "); -module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444); +int amdgpu_dm_abm_level = -1; +MODULE_PARM_DESC(abmlevel, + "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))"); +module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444); int amdgpu_backlight = -1; MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))"); module_param_named(backlight, amdgpu_backlight, bint, 0444); /** + * DOC: damageclips (int) + * Enable or disable damage clips support. If damage clips support is disabled, + * we will force full frame updates, irrespective of what user space sends to + * us. + * + * Defaults to -1 (where it is enabled unless a PSR-SU display is detected). + */ +MODULE_PARM_DESC(damageclips, + "Damage clips support (0 = disable, 1 = enable, -1 auto (default))"); +module_param_named(damageclips, amdgpu_damage_clips, int, 0444); + +/** * DOC: tmz (int) * Trusted Memory Zone (TMZ) is a method to protect data being written * to or read from memory. @@ -2476,6 +2490,7 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2490,6 +2505,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 5706b282a0c7..c7df7fa3459f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -97,6 +97,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) stats.requested_visible_vram/1024UL); drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", stats.requested_gtt/1024UL); + drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL); + drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL); + drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL); + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 49a5f1c73b3e..22aeee8adb71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -187,7 +187,34 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, else ++bo_va->ref_count; amdgpu_bo_unreserve(abo); - return 0; + + /* Validate and add eviction fence to DMABuf imports with dynamic + * attachment in compute VMs. Re-validation will be done by + * amdgpu_vm_validate. Fences are on the reservation shared with the + * export, which is currently required to be validated and fenced + * already by amdgpu_amdkfd_gpuvm_restore_process_bos. + * + * Nested locking below for the case that a GEM object is opened in + * kfd_mem_export_dmabuf. Since the lock below is only taken for imports, + * but not for export, this is a different lock class that cannot lead to + * circular lock dependencies. + */ + if (!vm->is_compute_context || !vm->process_info) + return 0; + if (!obj->import_attach || + !dma_buf_is_dynamic(obj->import_attach->dmabuf)) + return 0; + mutex_lock_nested(&vm->process_info->lock, 1); + if (!WARN_ON(!vm->process_info->eviction_fence)) { + r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT, + &vm->process_info->eviction_fence->base); + if (r) + dev_warn(adev->dev, "%d: validate_and_fence failed: %d\n", + vm->task_info.pid, r); + } + mutex_unlock(&vm->process_info->lock); + + return r; } static void amdgpu_gem_object_close(struct drm_gem_object *obj, @@ -682,10 +709,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, uint64_t vm_size; int r = 0; - if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { + if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) { dev_dbg(dev->dev, "va_address 0x%llx is in reserved area 0x%llx\n", - args->va_address, AMDGPU_VA_RESERVED_SIZE); + args->va_address, AMDGPU_VA_RESERVED_BOTTOM); return -EINVAL; } @@ -701,7 +728,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->va_address &= AMDGPU_GMC_HOLE_MASK; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - vm_size -= AMDGPU_VA_RESERVED_SIZE; + vm_size -= AMDGPU_VA_RESERVED_TOP; if (args->va_address + args->map_size > vm_size) { dev_dbg(dev->dev, "va_address 0x%llx is in top reserved area 0x%llx\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 82b4b2019fca..f04803a44b44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -643,8 +643,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); for (i = 0; i < adev->gfx.num_compute_rings; i++) { j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_map_queues(kiq_ring, - &adev->gfx.compute_ring[j]); + kiq->pmf->kiq_map_queues(kiq_ring, + &adev->gfx.compute_ring[j]); } r = amdgpu_ring_test_helper(kiq_ring); @@ -724,8 +724,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + /* If going to s2idle, no need to wait */ + if (adev->in_s0ix) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } else { + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); + } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 55784a9f26c4..d4a848c51a83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -52,7 +52,7 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev) struct amdgpu_bo_param bp; u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; - uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift; + uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift; memset(&bp, 0, sizeof(bp)); bp.size = PAGE_ALIGN((npdes + 1) * 8); @@ -746,6 +746,59 @@ error_unlock_reset: return r; } +void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask, + uint32_t xcc_inst) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst]; + struct amdgpu_ring *ring = &kiq->ring; + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + + if (adev->mes.ring.sched.ready) { + amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1, + ref, mask); + return; + } + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, + ref, mask); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for IRQ context */ + if (r < 1 && in_interrupt()) + goto failed_kiq; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq; + + return; + +failed_undo: + amdgpu_ring_undo(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq: + dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1); +} + /** * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index e699d1ca8deb..17f40ea1104b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -417,6 +417,10 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, bool all_hub, uint32_t inst); +void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask, + uint32_t xcc_inst); extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index ddd0891da116..3d7fcdeaf8cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -62,9 +62,8 @@ int amdgpu_pasid_alloc(unsigned int bits) int pasid = -EINVAL; for (bits = min(bits, 31U); bits > 0; bits--) { - pasid = ida_simple_get(&amdgpu_pasid_ida, - 1U << (bits - 1), 1U << bits, - GFP_KERNEL); + pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1), + (1U << bits) - 1, GFP_KERNEL); if (pasid != -ENOSPC) break; } @@ -82,7 +81,7 @@ int amdgpu_pasid_alloc(unsigned int bits) void amdgpu_pasid_free(u32 pasid) { trace_amdgpu_pasid_freed(pasid); - ida_simple_remove(&amdgpu_pasid_ida, pasid); + ida_free(&amdgpu_pasid_ida, pasid); } static void amdgpu_pasid_free_cb(struct dma_fence *fence, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 2ff2897fd1db..6df99cb00d9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -36,10 +36,35 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work); int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) { + int i, r; + INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler); mutex_init(&adev->jpeg.jpeg_pg_lock); atomic_set(&adev->jpeg.total_submission_cnt, 0); + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG)) + adev->jpeg.indirect_sram = true; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (adev->jpeg.indirect_sram) { + r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->jpeg.inst[i].dpg_sram_bo, + &adev->jpeg.inst[i].dpg_sram_gpu_addr, + &adev->jpeg.inst[i].dpg_sram_cpu_addr); + if (r) { + dev_err(adev->dev, + "JPEG %d (%d) failed to allocate DPG bo\n", i, r); + return r; + } + } + } + return 0; } @@ -51,6 +76,11 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) if (adev->jpeg.harvest_config & (1 << i)) continue; + amdgpu_bo_free_kernel( + &adev->jpeg.inst[i].dpg_sram_bo, + &adev->jpeg.inst[i].dpg_sram_gpu_addr, + (void **)&adev->jpeg.inst[i].dpg_sram_cpu_addr); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]); } @@ -210,12 +240,15 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else { r = 0; } + if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]); if (tmp == 0xDEADBEEF) break; udelay(1); + if (amdgpu_emu_mode == 1) + udelay(10); } if (i >= adev->usec_timeout) @@ -296,3 +329,16 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) return 0; } + +int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, + enum AMDGPU_UCODE_ID ucode_id) +{ + struct amdgpu_firmware_info ucode = { + .ucode_id = AMDGPU_UCODE_ID_JPEG_RAM, + .mc_addr = adev->jpeg.inst[inst_idx].dpg_sram_gpu_addr, + .ucode_size = ((uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr), + }; + + return psp_execute_ip_fw_load(&adev->psp, &ucode); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index ffe47e9f5bf2..aea31d61d991 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -32,6 +32,34 @@ #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) +#define WREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \ + do { \ + if (!indirect) { \ + WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \ + mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15( \ + JPEG, GET_INST(JPEG, inst_idx), \ + mmUVD_DPG_LMA_CTL, \ + (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \ + indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ + } else { \ + *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \ + offset; \ + *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \ + value; \ + } \ + } while (0) + +#define RREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, mask_en) \ + ({ \ + WREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_CTL, \ + (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \ + }) + struct amdgpu_jpeg_reg{ unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS]; }; @@ -41,6 +69,11 @@ struct amdgpu_jpeg_inst { struct amdgpu_irq_src irq; struct amdgpu_irq_src ras_poison_irq; struct amdgpu_jpeg_reg external; + struct amdgpu_bo *dpg_sram_bo; + struct dpg_pause_state pause_state; + void *dpg_sram_cpu_addr; + uint64_t dpg_sram_gpu_addr; + uint32_t *dpg_sram_curr_addr; uint8_t aid_id; }; @@ -63,6 +96,7 @@ struct amdgpu_jpeg { uint16_t inst_mask; uint8_t num_inst_per_aid; + bool indirect_sram; }; int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); @@ -82,5 +116,7 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, + enum AMDGPU_UCODE_ID ucode_id); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bf4f48fe438d..a2df3025a754 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -894,14 +894,14 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - vm_size -= AMDGPU_VA_RESERVED_SIZE; + vm_size -= AMDGPU_VA_RESERVED_TOP; /* Older VCE FW versions are buggy and can handle only 40bits */ if (adev->vce.fw_version && adev->vce.fw_version < AMDGPU_VCE_FW_53_45) vm_size = min(vm_size, 1ULL << 40); - dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; + dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM; dev_info->virtual_address_max = min(vm_size, AMDGPU_GMC_HOLE_START); @@ -1114,6 +1114,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } ui32 >>= 8; break; + case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER: + /* get input GPU power */ + if (amdgpu_dpm_read_sensor(adev, + AMDGPU_PP_SENSOR_GPU_INPUT_POWER, + (void *)&ui32, &ui32_size)) { + return -EINVAL; + } + ui32 >>= 8; + break; case AMDGPU_INFO_SENSOR_VDDNB: /* get VDDNB in millivolts */ if (amdgpu_dpm_read_sensor(adev, @@ -1370,6 +1379,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto error_vm; } + r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va); + if (r) + goto error_vm; + mutex_init(&fpriv->bo_list_lock); idr_init_base(&fpriv->bo_list_handles, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 59fafb8392e0..24ad4b97177b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -27,6 +27,16 @@ #include "umc/umc_6_7_0_offset.h" #include "umc/umc_6_7_0_sh_mask.h" +static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev, + uint64_t mc_status) +{ + if (adev->umc.ras->check_ecc_err_status) + return adev->umc.ras->check_ecc_err_status(adev, + AMDGPU_MCA_ERROR_TYPE_DE, &mc_status); + + return false; +} + void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, unsigned long *error_count) @@ -202,16 +212,16 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry) { - dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n"); - dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n", + dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); + dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n", idx, entry->regs[MCA_REG_IDX_STATUS]); - dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n", + dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n", idx, entry->regs[MCA_REG_IDX_ADDR]); - dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n", + dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n", idx, entry->regs[MCA_REG_IDX_MISC0]); - dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n", + dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n", idx, entry->regs[MCA_REG_IDX_IPID]); - dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n", + dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n", idx, entry->regs[MCA_REG_IDX_SYND]); } @@ -256,9 +266,14 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo if (type == AMDGPU_MCA_ERROR_TYPE_UE) amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, &err_addr, (uint64_t)count); - else - amdgpu_ras_error_statistic_ce_count(err_data, - &mcm_info, &err_addr, (uint64_t)count); + else { + if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS])) + amdgpu_ras_error_statistic_de_count(err_data, + &mcm_info, &err_addr, (uint64_t)count); + else + amdgpu_ras_error_statistic_ce_count(err_data, + &mcm_info, &err_addr, (uint64_t)count); + } } out_mca_release: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index b399f1b62887..b964110ed1e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -65,6 +65,7 @@ enum amdgpu_mca_ip { enum amdgpu_mca_error_type { AMDGPU_MCA_ERROR_TYPE_UE = 0, AMDGPU_MCA_ERROR_TYPE_CE, + AMDGPU_MCA_ERROR_TYPE_DE, }; struct amdgpu_mca_ras_block { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index da48b6da0107..a98e03e0a51f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1398,7 +1398,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) goto error_fini; } - ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; + ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); if (r) { DRM_ERROR("failed to map ctx meta data\n"); @@ -1565,9 +1565,9 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; - - debugfs_create_file("amdgpu_mes_event_log", 0444, root, - adev, &amdgpu_debugfs_mes_event_log_fops); + if (adev->enable_mes) + debugfs_create_file("amdgpu_mes_event_log", 0444, root, + adev, &amdgpu_debugfs_mes_event_log_fops); #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b671b0665492..010b0cb7693c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1273,25 +1273,36 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats) { uint64_t size = amdgpu_bo_size(bo); + struct drm_gem_object *obj; unsigned int domain; + bool shared; /* Abort if the BO doesn't currently have a backing store */ if (!bo->tbo.resource) return; + obj = &bo->tbo.base; + shared = drm_gem_object_is_shared_for_memory_stats(obj); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: stats->vram += size; if (amdgpu_bo_in_cpu_visible_vram(bo)) stats->visible_vram += size; + if (shared) + stats->vram_shared += size; break; case AMDGPU_GEM_DOMAIN_GTT: stats->gtt += size; + if (shared) + stats->gtt_shared += size; break; case AMDGPU_GEM_DOMAIN_CPU: default: stats->cpu += size; + if (shared) + stats->cpu_shared += size; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a3ea8a82db23..be679c42b0b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -138,12 +138,18 @@ struct amdgpu_bo_vm { struct amdgpu_mem_stats { /* current VRAM usage, includes visible VRAM */ uint64_t vram; + /* current shared VRAM usage, includes visible VRAM */ + uint64_t vram_shared; /* current visible VRAM usage */ uint64_t visible_vram; /* current GTT usage */ uint64_t gtt; + /* current shared GTT usage */ + uint64_t gtt_shared; /* current system memory usage */ uint64_t cpu; + /* current shared system memory usage */ + uint64_t cpu_shared; /* sum of evicted buffers, includes visible VRAM */ uint64_t evicted_vram; /* sum of evicted buffers due to CPU access */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0328616473f8..3c2b1413058b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -38,6 +38,7 @@ #include "psp_v12_0.h" #include "psp_v13_0.h" #include "psp_v13_0_4.h" +#include "psp_v14_0.h" #include "amdgpu_ras.h" #include "amdgpu_securedisplay.h" @@ -162,20 +163,26 @@ static int psp_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; + psp->autoload_supported = true; + psp->boot_time_tmr = true; + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(9, 0, 0): psp_v3_1_set_psp_funcs(psp); psp->autoload_supported = false; + psp->boot_time_tmr = false; break; case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): psp_v10_0_set_psp_funcs(psp); psp->autoload_supported = false; + psp->boot_time_tmr = false; break; case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 4): psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = false; + psp->boot_time_tmr = false; break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 7): @@ -188,15 +195,20 @@ static int psp_early_init(void *handle) case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): psp_v11_0_set_psp_funcs(psp); - psp->autoload_supported = true; + psp->boot_time_tmr = false; break; case IP_VERSION(11, 0, 3): case IP_VERSION(12, 0, 1): psp_v12_0_set_psp_funcs(psp); + psp->autoload_supported = false; + psp->boot_time_tmr = false; break; case IP_VERSION(13, 0, 2): + psp->boot_time_tmr = false; + fallthrough; case IP_VERSION(13, 0, 6): psp_v13_0_set_psp_funcs(psp); + psp->autoload_supported = false; break; case IP_VERSION(13, 0, 1): case IP_VERSION(13, 0, 3): @@ -204,25 +216,31 @@ static int psp_early_init(void *handle) case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 11): case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): psp_v13_0_set_psp_funcs(psp); - psp->autoload_supported = true; + psp->boot_time_tmr = false; break; case IP_VERSION(11, 0, 8): if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { psp_v11_0_8_set_psp_funcs(psp); - psp->autoload_supported = false; } + psp->autoload_supported = false; + psp->boot_time_tmr = false; break; case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): psp_v13_0_set_psp_funcs(psp); - psp->autoload_supported = true; adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); + psp->boot_time_tmr = false; break; case IP_VERSION(13, 0, 4): psp_v13_0_4_set_psp_funcs(psp); - psp->autoload_supported = true; + psp->boot_time_tmr = false; + break; + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + psp_v14_0_set_psp_funcs(psp); break; default: return -EINVAL; @@ -230,6 +248,8 @@ static int psp_early_init(void *handle) psp->adev = adev; + adev->psp_timeout = 20000; + psp_check_pmfw_centralized_cstate_management(psp); if (amdgpu_sriov_vf(adev)) @@ -291,21 +311,22 @@ static int psp_memory_training_init(struct psp_context *psp) struct psp_memory_training_context *ctx = &psp->mem_train_ctx; if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { - DRM_DEBUG("memory training is not supported!\n"); + dev_dbg(psp->adev->dev, "memory training is not supported!\n"); return 0; } ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); if (ctx->sys_cache == NULL) { - DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); + dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); ret = -ENOMEM; goto Err_out; } - DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", - ctx->train_data_size, - ctx->p2c_train_data_offset, - ctx->c2p_train_data_offset); + dev_dbg(psp->adev->dev, + "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; return 0; @@ -407,7 +428,7 @@ static int psp_sw_init(void *handle) psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!psp->cmd) { - DRM_ERROR("Failed to allocate memory to command buffer!\n"); + dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); ret = -ENOMEM; } @@ -454,13 +475,13 @@ static int psp_sw_init(void *handle) if (mem_training_ctx->enable_mem_training) { ret = psp_memory_training_init(psp); if (ret) { - DRM_ERROR("Failed to initialize memory training!\n"); + dev_err(adev->dev, "Failed to initialize memory training!\n"); return ret; } ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); if (ret) { - DRM_ERROR("Failed to process memory training!\n"); + dev_err(adev->dev, "Failed to process memory training!\n"); return ret; } } @@ -626,7 +647,7 @@ psp_cmd_submit_buf(struct psp_context *psp, { int ret; int index; - int timeout = 20000; + int timeout = psp->adev->psp_timeout; bool ras_intr = false; bool skip_unsupport = false; @@ -675,9 +696,11 @@ psp_cmd_submit_buf(struct psp_context *psp, */ if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { if (ucode) - DRM_WARN("failed to load ucode %s(0x%X) ", - amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); - DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n", + dev_warn(psp->adev->dev, + "failed to load ucode %s(0x%X) ", + amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); + dev_warn(psp->adev->dev, + "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, psp->cmd_buf_mem->resp.status); /* If any firmware (including CAP) load fails under SRIOV, it should @@ -771,16 +794,6 @@ static int psp_load_toc(struct psp_context *psp, return ret; } -static bool psp_boottime_tmr(struct psp_context *psp) -{ - switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { - case IP_VERSION(13, 0, 6): - return true; - default: - return false; - } -} - /* Set up Trusted Memory Region */ static int psp_tmr_init(struct psp_context *psp) { @@ -807,12 +820,12 @@ static int psp_tmr_init(struct psp_context *psp) psp->fw_pri_buf) { ret = psp_load_toc(psp, &tmr_size); if (ret) { - DRM_ERROR("Failed to load toc\n"); + dev_err(psp->adev->dev, "Failed to load toc\n"); return ret; } } - if (!psp->tmr_bo) { + if (!psp->tmr_bo && !psp->boot_time_tmr) { pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, @@ -855,7 +868,7 @@ static int psp_tmr_load(struct psp_context *psp) psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); if (psp->tmr_bo) - DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", + dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); ret = psp_cmd_submit_buf(psp, NULL, cmd, @@ -1113,7 +1126,7 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, psp_prep_reg_prog_cmd_buf(cmd, reg, value); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (ret) - DRM_ERROR("PSP failed to program reg id %d", reg); + dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); release_psp_cmd_buf(psp); @@ -1526,22 +1539,22 @@ static void psp_ras_ta_check_status(struct psp_context *psp) switch (ras_cmd->ras_status) { case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: dev_warn(psp->adev->dev, - "RAS WARNING: cmd failed due to unsupported ip\n"); + "RAS WARNING: cmd failed due to unsupported ip\n"); break; case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: dev_warn(psp->adev->dev, - "RAS WARNING: cmd failed due to unsupported error injection\n"); + "RAS WARNING: cmd failed due to unsupported error injection\n"); break; case TA_RAS_STATUS__SUCCESS: break; case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) dev_warn(psp->adev->dev, - "RAS WARNING: Inject error to critical region is not allowed\n"); + "RAS WARNING: Inject error to critical region is not allowed\n"); break; default: dev_warn(psp->adev->dev, - "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); break; } } @@ -1565,7 +1578,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) return ret; if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { - DRM_WARN("RAS: Unsupported Interface"); + dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); return -EINVAL; } @@ -1715,7 +1728,7 @@ int psp_ras_initialize(struct psp_context *psp) psp->ras_context.context.initialized = true; else { if (ras_cmd->ras_status) - dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); + dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); /* fail to load RAS TA */ psp->ras_context.context.initialized = false; @@ -1779,6 +1792,31 @@ int psp_ras_trigger_error(struct psp_context *psp, return 0; } + +int psp_ras_query_address(struct psp_context *psp, + struct ta_ras_query_address_input *addr_in, + struct ta_ras_query_address_output *addr_out) +{ + struct ta_ras_shared_memory *ras_cmd; + int ret; + + if (!psp->ras_context.context.initialized) + return -EINVAL; + + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); + + ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS; + ras_cmd->ras_in_message.address = *addr_in; + + ret = psp_ras_invoke(psp, ras_cmd->cmd_id); + if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) + return -EINVAL; + + *addr_out = ras_cmd->ras_out_message.address; + + return 0; +} // ras end // HDCP start @@ -2125,19 +2163,14 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) return ret; } -int amdgpu_psp_query_boot_status(struct amdgpu_device *adev) +bool amdgpu_psp_get_ras_capability(struct psp_context *psp) { - struct psp_context *psp = &adev->psp; - int ret = 0; - - if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) - return 0; - if (psp->funcs && - psp->funcs->query_boot_status) - ret = psp->funcs->query_boot_status(psp); - - return ret; + psp->funcs->get_ras_capability) { + return psp->funcs->get_ras_capability(psp); + } else { + return false; + } } static int psp_hw_start(struct psp_context *psp) @@ -2150,7 +2183,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_kdb != NULL)) { ret = psp_bootloader_load_kdb(psp); if (ret) { - DRM_ERROR("PSP load kdb failed!\n"); + dev_err(adev->dev, "PSP load kdb failed!\n"); return ret; } } @@ -2159,7 +2192,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_spl != NULL)) { ret = psp_bootloader_load_spl(psp); if (ret) { - DRM_ERROR("PSP load spl failed!\n"); + dev_err(adev->dev, "PSP load spl failed!\n"); return ret; } } @@ -2168,7 +2201,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_sysdrv != NULL)) { ret = psp_bootloader_load_sysdrv(psp); if (ret) { - DRM_ERROR("PSP load sys drv failed!\n"); + dev_err(adev->dev, "PSP load sys drv failed!\n"); return ret; } } @@ -2177,7 +2210,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_soc_drv != NULL)) { ret = psp_bootloader_load_soc_drv(psp); if (ret) { - DRM_ERROR("PSP load soc drv failed!\n"); + dev_err(adev->dev, "PSP load soc drv failed!\n"); return ret; } } @@ -2186,7 +2219,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_intf_drv != NULL)) { ret = psp_bootloader_load_intf_drv(psp); if (ret) { - DRM_ERROR("PSP load intf drv failed!\n"); + dev_err(adev->dev, "PSP load intf drv failed!\n"); return ret; } } @@ -2195,7 +2228,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_dbg_drv != NULL)) { ret = psp_bootloader_load_dbg_drv(psp); if (ret) { - DRM_ERROR("PSP load dbg drv failed!\n"); + dev_err(adev->dev, "PSP load dbg drv failed!\n"); return ret; } } @@ -2204,7 +2237,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_ras_drv != NULL)) { ret = psp_bootloader_load_ras_drv(psp); if (ret) { - DRM_ERROR("PSP load ras_drv failed!\n"); + dev_err(adev->dev, "PSP load ras_drv failed!\n"); return ret; } } @@ -2213,7 +2246,7 @@ static int psp_hw_start(struct psp_context *psp) (psp->funcs->bootloader_load_sos != NULL)) { ret = psp_bootloader_load_sos(psp); if (ret) { - DRM_ERROR("PSP load sos failed!\n"); + dev_err(adev->dev, "PSP load sos failed!\n"); return ret; } } @@ -2221,17 +2254,17 @@ static int psp_hw_start(struct psp_context *psp) ret = psp_ring_create(psp, PSP_RING_TYPE__KM); if (ret) { - DRM_ERROR("PSP create ring failed!\n"); + dev_err(adev->dev, "PSP create ring failed!\n"); return ret; } if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; - if (!psp_boottime_tmr(psp)) { + if (!psp->boot_time_tmr || psp->autoload_supported) { ret = psp_tmr_init(psp); if (ret) { - DRM_ERROR("PSP tmr init failed!\n"); + dev_err(adev->dev, "PSP tmr init failed!\n"); return ret; } } @@ -2248,10 +2281,12 @@ skip_pin_bo: return ret; } - ret = psp_tmr_load(psp); - if (ret) { - DRM_ERROR("PSP load tmr failed!\n"); - return ret; + if (!psp->boot_time_tmr || !psp->autoload_supported) { + ret = psp_tmr_load(psp); + if (ret) { + dev_err(adev->dev, "PSP load tmr failed!\n"); + return ret; + } } return 0; @@ -2462,6 +2497,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_P2S_TABLE: *type = GFX_FW_TYPE_P2S_TABLE; break; + case AMDGPU_UCODE_ID_JPEG_RAM: + *type = GFX_FW_TYPE_JPEG_RAM; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; @@ -2518,7 +2556,8 @@ static void psp_print_fw_hdr(struct psp_context *psp, } } -static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, +static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, + struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) { int ret; @@ -2531,7 +2570,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); if (ret) - DRM_ERROR("Unknown firmware type\n"); + dev_err(psp->adev->dev, "Unknown firmware type\n"); return ret; } @@ -2542,7 +2581,7 @@ int psp_execute_ip_fw_load(struct psp_context *psp, int ret = 0; struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd); + ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); if (!ret) { ret = psp_cmd_submit_buf(psp, ucode, cmd, psp->fence_buf_mc_addr); @@ -2601,13 +2640,13 @@ static int psp_load_smu_fw(struct psp_context *psp) amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); if (ret) - DRM_WARN("Failed to set MP1 state prepare for reload\n"); + dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); } ret = psp_execute_ip_fw_load(psp, ucode); if (ret) - DRM_ERROR("PSP load smu failed!\n"); + dev_err(adev->dev, "PSP load smu failed!\n"); return ret; } @@ -2712,7 +2751,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp) adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload_start(psp); if (ret) { - DRM_ERROR("Failed to start rlc autoload\n"); + dev_err(adev->dev, "Failed to start rlc autoload\n"); return ret; } } @@ -2734,7 +2773,7 @@ static int psp_load_fw(struct amdgpu_device *adev) ret = psp_ring_init(psp, PSP_RING_TYPE__KM); if (ret) { - DRM_ERROR("PSP ring init failed!\n"); + dev_err(adev->dev, "PSP ring init failed!\n"); goto failed; } } @@ -2749,13 +2788,13 @@ static int psp_load_fw(struct amdgpu_device *adev) ret = psp_asd_initialize(psp); if (ret) { - DRM_ERROR("PSP load asd failed!\n"); + dev_err(adev->dev, "PSP load asd failed!\n"); goto failed1; } ret = psp_rl_load(adev); if (ret) { - DRM_ERROR("PSP load RL failed!\n"); + dev_err(adev->dev, "PSP load RL failed!\n"); goto failed1; } @@ -2775,7 +2814,7 @@ static int psp_load_fw(struct amdgpu_device *adev) ret = psp_ras_initialize(psp); if (ret) dev_err(psp->adev->dev, - "RAS: Failed to initialize RAS\n"); + "RAS: Failed to initialize RAS\n"); ret = psp_hdcp_initialize(psp); if (ret) @@ -2828,7 +2867,7 @@ static int psp_hw_init(void *handle) ret = psp_load_fw(adev); if (ret) { - DRM_ERROR("PSP firmware loading failed\n"); + dev_err(adev->dev, "PSP firmware loading failed\n"); goto failed; } @@ -2875,7 +2914,7 @@ static int psp_suspend(void *handle) psp->xgmi_context.context.initialized) { ret = psp_xgmi_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate xgmi ta\n"); + dev_err(adev->dev, "Failed to terminate xgmi ta\n"); goto out; } } @@ -2883,46 +2922,46 @@ static int psp_suspend(void *handle) if (psp->ta_fw) { ret = psp_ras_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate ras ta\n"); + dev_err(adev->dev, "Failed to terminate ras ta\n"); goto out; } ret = psp_hdcp_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate hdcp ta\n"); + dev_err(adev->dev, "Failed to terminate hdcp ta\n"); goto out; } ret = psp_dtm_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate dtm ta\n"); + dev_err(adev->dev, "Failed to terminate dtm ta\n"); goto out; } ret = psp_rap_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate rap ta\n"); + dev_err(adev->dev, "Failed to terminate rap ta\n"); goto out; } ret = psp_securedisplay_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate securedisplay ta\n"); + dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); goto out; } } ret = psp_asd_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate asd\n"); + dev_err(adev->dev, "Failed to terminate asd\n"); goto out; } ret = psp_tmr_terminate(psp); if (ret) { - DRM_ERROR("Failed to terminate tmr\n"); + dev_err(adev->dev, "Failed to terminate tmr\n"); goto out; } ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); if (ret) - DRM_ERROR("PSP ring stop failed\n"); + dev_err(adev->dev, "PSP ring stop failed\n"); out: return ret; @@ -2934,12 +2973,12 @@ static int psp_resume(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - DRM_INFO("PSP is resuming...\n"); + dev_info(adev->dev, "PSP is resuming...\n"); if (psp->mem_train_ctx.enable_mem_training) { ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); if (ret) { - DRM_ERROR("Failed to process memory training!\n"); + dev_err(adev->dev, "Failed to process memory training!\n"); return ret; } } @@ -2956,7 +2995,7 @@ static int psp_resume(void *handle) ret = psp_asd_initialize(psp); if (ret) { - DRM_ERROR("PSP load asd failed!\n"); + dev_err(adev->dev, "PSP load asd failed!\n"); goto failed; } @@ -2980,7 +3019,7 @@ static int psp_resume(void *handle) ret = psp_ras_initialize(psp); if (ret) dev_err(psp->adev->dev, - "RAS: Failed to initialize RAS\n"); + "RAS: Failed to initialize RAS\n"); ret = psp_hdcp_initialize(psp); if (ret) @@ -3008,7 +3047,7 @@ static int psp_resume(void *handle) return 0; failed: - DRM_ERROR("PSP resume failed\n"); + dev_err(adev->dev, "PSP resume failed\n"); mutex_unlock(&adev->firmware.mutex); return ret; } @@ -3069,9 +3108,11 @@ int psp_ring_cmd_submit(struct psp_context *psp, write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); /* Check invalid write_frame ptr address */ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); + dev_err(adev->dev, + "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", + ring_buffer_start, ring_buffer_end, write_frame); + dev_err(adev->dev, + "write_frame is pointing to address out of bounds\n"); return -EINVAL; } @@ -3597,7 +3638,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, int ret; if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { - DRM_INFO("PSP block is not ready yet."); + dev_info(adev->dev, "PSP block is not ready yet\n."); return -EBUSY; } @@ -3606,7 +3647,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, mutex_unlock(&adev->psp.mutex); if (ret) { - DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); + dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); return ret; } @@ -3628,7 +3669,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, void *fw_pri_cpu_addr; if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { - DRM_INFO("PSP block is not ready yet."); + dev_err(adev->dev, "PSP block is not ready yet."); return -EBUSY; } @@ -3661,7 +3702,7 @@ rel_buf: release_firmware(usbc_pd_fw); fail: if (ret) { - DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); + dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); count = ret; } @@ -3708,7 +3749,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, /* Safeguard against memory drain */ if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { - dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B); + dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); kvfree(adev->psp.vbflash_tmp_buf); adev->psp.vbflash_tmp_buf = NULL; adev->psp.vbflash_image_size = 0; @@ -3727,7 +3768,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, adev->psp.vbflash_image_size += count; mutex_unlock(&adev->psp.mutex); - dev_dbg(adev->dev, "IFWI staged for update"); + dev_dbg(adev->dev, "IFWI staged for update\n"); return count; } @@ -3747,7 +3788,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, if (adev->psp.vbflash_image_size == 0) return -EINVAL; - dev_dbg(adev->dev, "PSP IFWI flash process initiated"); + dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, AMDGPU_GPU_PAGE_SIZE, @@ -3772,11 +3813,11 @@ rel_buf: adev->psp.vbflash_image_size = 0; if (ret) { - dev_err(adev->dev, "Failed to load IFWI, err = %d", ret); + dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); return ret; } - dev_dbg(adev->dev, "PSP IFWI flash process done"); + dev_dbg(adev->dev, "PSP IFWI flash process done\n"); return 0; } @@ -3930,3 +3971,11 @@ const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { .rev = 4, .funcs = &psp_ip_funcs, }; + +const struct amdgpu_ip_block_version psp_v14_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_PSP, + .major = 14, + .minor = 0, + .rev = 0, + .funcs = &psp_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index c4d9cbde55b9..ee16f134ae92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -134,7 +134,7 @@ struct psp_funcs { int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); - int (*query_boot_status)(struct psp_context *psp); + bool (*get_ras_capability)(struct psp_context *psp); }; struct ta_funcs { @@ -203,7 +203,7 @@ struct psp_ras_context { #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 /*Define the VRAM size that will be encroached by BIST training.*/ -#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 +#define BIST_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 enum psp_memory_training_init_flag { PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, @@ -364,6 +364,8 @@ struct psp_context { atomic_t fence_value; /* flag to mark whether gfx fw autoload is supported or not */ bool autoload_supported; + /* flag to mark whether psp use runtime TMR or boottime TMR */ + bool boot_time_tmr; /* flag to mark whether df cstate management centralized to PMFW */ bool pmfw_centralized_cstate_management; @@ -463,6 +465,7 @@ extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block; extern const struct amdgpu_ip_block_version psp_v12_0_ip_block; extern const struct amdgpu_ip_block_version psp_v13_0_ip_block; extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block; +extern const struct amdgpu_ip_block_version psp_v14_0_ip_block; extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t field_val, uint32_t mask, bool check_changed); @@ -502,6 +505,9 @@ int psp_ras_enable_features(struct psp_context *psp, int psp_ras_trigger_error(struct psp_context *psp, struct ta_ras_trigger_error_input *info, uint32_t instance_mask); int psp_ras_terminate(struct psp_context *psp); +int psp_ras_query_address(struct psp_context *psp, + struct ta_ras_query_address_input *addr_in, + struct ta_ras_query_address_output *addr_out); int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); @@ -538,7 +544,5 @@ int psp_spatial_partition(struct psp_context *psp, int mode); int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); - -int amdgpu_psp_query_boot_status(struct amdgpu_device *adev); - +bool amdgpu_psp_get_ras_capability(struct psp_context *psp); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 468a67b302d4..ca5c86e5f7cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size } } - if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len)) + if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len)) ret = -EFAULT; err_free_shared_buf: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 31823a30dea2..46f3d1013e8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -39,6 +39,7 @@ #include "nbio_v7_9.h" #include "atom.h" #include "amdgpu_reset.h" +#include "amdgpu_psp.h" #ifdef CONFIG_X86_MCE_AMD #include <asm/mce.h> @@ -73,6 +74,8 @@ const char *ras_block_string[] = { "mca", "vcn", "jpeg", + "ih", + "mpio", }; const char *ras_mca_block_string[] = { @@ -94,7 +97,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) if (!ras_block) return "NULL"; - if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) + if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT || + ras_block->block >= ARRAY_SIZE(ras_block_string)) return "OUT OF RANGE"; if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) @@ -116,6 +120,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) +#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -628,8 +634,12 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } - return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, - "ce", info.ce_count); + if (info.head.block == AMDGPU_RAS_BLOCK__UMC) + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count, "de", info.de_count); + else + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count); } /* obj begin */ @@ -1036,7 +1046,8 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, struct ras_manager *ras_mgr, struct ras_err_data *err_data, const char *blk_name, - bool is_ue) + bool is_ue, + bool is_de) { struct amdgpu_smuio_mcm_config_info *mcm_info; struct ras_err_node *err_node; @@ -1065,25 +1076,50 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, } } else { - for_each_ras_error(err_node, err_data) { - err_info = &err_node->err_info; - mcm_info = &err_info->mcm_info; - if (err_info->ce_count) { + if (is_de) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->de_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new deferred hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->de_count, + blk_name); + } + } + + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new correctable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ce_count, - blk_name); + "%lld deferred hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, + err_info->de_count, blk_name); + } + } else { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ce_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new correctable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ce_count, + blk_name); + } } - } - for_each_ras_error(err_node, &ras_mgr->err_data) { - err_info = &err_node->err_info; - mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld correctable hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name); + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld correctable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, + err_info->ce_count, blk_name); + } } } } @@ -1102,7 +1138,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, if (err_data->ce_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false); + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + blk_name, false, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && @@ -1124,7 +1161,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, if (err_data->ue_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true); + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + blk_name, true, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && @@ -1144,6 +1182,28 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } } + if (err_data->de_count) { + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + blk_name, false, true); + } else if (!adev->aid_mask && + adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld deferred hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.de_count, + blk_name); + } else { + dev_info(adev->dev, "%ld deferred hardware errors " + "detected in %s block\n", + ras_mgr->err_data.de_count, + blk_name); + } + } } static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) @@ -1154,7 +1214,8 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s if (err_data_has_source_info(err_data)) { for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; - + amdgpu_ras_error_statistic_de_count(&obj->err_data, + &err_info->mcm_info, NULL, err_info->de_count); amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, NULL, err_info->ce_count); amdgpu_ras_error_statistic_ue_count(&obj->err_data, @@ -1164,9 +1225,72 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s /* for legacy asic path which doesn't has error source info */ obj->err_data.ue_count += err_data->ue_count; obj->err_data.ce_count += err_data->ce_count; + obj->err_data.de_count += err_data->de_count; } } +static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) +{ + struct ras_common_if head; + + memset(&head, 0, sizeof(head)); + head.block = blk; + + return amdgpu_ras_find_obj(adev, &head); +} + +int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + const struct aca_info *aca_info, void *data) +{ + struct ras_manager *obj; + + obj = get_ras_manager(adev, blk); + if (!obj) + return -EINVAL; + + return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data); +} + +int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) +{ + struct ras_manager *obj; + + obj = get_ras_manager(adev, blk); + if (!obj) + return -EINVAL; + + amdgpu_aca_remove_handle(&obj->aca_handle); + + return 0; +} + +static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + enum aca_error_type type, struct ras_err_data *err_data) +{ + struct ras_manager *obj; + + obj = get_ras_manager(adev, blk); + if (!obj) + return -EINVAL; + + return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data); +} + +ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, + struct aca_handle *handle, char *buf, void *data) +{ + struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle); + struct ras_query_if info = { + .head = obj->head, + }; + + if (amdgpu_ras_query_error_status(obj->adev, &info)) + return -EINVAL; + + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count); +} + static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, struct ras_query_if *info, struct ras_err_data *err_data, @@ -1174,6 +1298,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, { enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; struct amdgpu_ras_block_object *block_obj = NULL; + int ret; if (blk == AMDGPU_RAS_BLOCK_COUNT) return -EINVAL; @@ -1203,9 +1328,19 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, } } } else { - /* FIXME: add code to check return value later */ - amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data); - amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data); + if (amdgpu_aca_is_enabled(adev)) { + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data); + if (ret) + return ret; + + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data); + if (ret) + return ret; + } else { + /* FIXME: add code to check return value later */ + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data); + } } return 0; @@ -1239,6 +1374,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; + info->de_count = obj->err_data.de_count; amdgpu_ras_error_generate_report(adev, info, &err_data); @@ -1254,6 +1390,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; struct amdgpu_hive_info *hive; int hive_ras_recovery = 0; @@ -1264,7 +1401,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, } if (!amdgpu_ras_is_supported(adev, block) || - !amdgpu_ras_get_mca_debug_mode(adev)) + !amdgpu_ras_get_aca_debug_mode(adev)) return -EOPNOTSUPP; hive = amdgpu_get_xgmi_hive(adev); @@ -1276,7 +1413,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, /* skip ras error reset in gpu reset */ if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) || hive_ras_recovery) && - mca_funcs && mca_funcs->mca_set_debug_mode) + ((smu_funcs && smu_funcs->set_debug_mode) || + (mca_funcs && mca_funcs->mca_set_debug_mode))) return -EOPNOTSUPP; if (block_obj->hw_ops->reset_ras_error_count) @@ -1772,7 +1910,10 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) } } - amdgpu_mca_smu_debugfs_init(adev, dir); + if (amdgpu_aca_is_enabled(adev)) + amdgpu_aca_smu_debugfs_init(adev, dir); + else + amdgpu_mca_smu_debugfs_init(adev, dir); } /* debugfs end */ @@ -1900,7 +2041,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * } } - amdgpu_umc_poison_handler(adev, false); + amdgpu_umc_poison_handler(adev, obj->head.block, false); if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); @@ -1951,6 +2092,7 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, */ obj->err_data.ue_count += err_data.ue_count; obj->err_data.ce_count += err_data.ce_count; + obj->err_data.de_count += err_data.de_count; } amdgpu_ras_error_data_fini(&err_data); @@ -2520,6 +2662,32 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, } } +static int amdgpu_ras_page_retirement_thread(void *param) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)param; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + while (!kthread_should_stop()) { + + wait_event_interruptible(con->page_retirement_wq, + kthread_should_stop() || + atomic_read(&con->page_retirement_req_cnt)); + + if (kthread_should_stop()) + break; + + dev_info(adev->dev, "Start processing page retirement. request:%d\n", + atomic_read(&con->page_retirement_req_cnt)); + + atomic_dec(&con->page_retirement_req_cnt); + + amdgpu_umc_bad_page_polling_timeout(adev, + false, MAX_UMC_POISON_POLLING_TIME_ASYNC); + } + + return 0; +} + int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -2583,6 +2751,16 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) } } + mutex_init(&con->page_retirement_lock); + init_waitqueue_head(&con->page_retirement_wq); + atomic_set(&con->page_retirement_req_cnt, 0); + con->page_retirement_thread = + kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement"); + if (IS_ERR(con->page_retirement_thread)) { + con->page_retirement_thread = NULL; + dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n"); + } + #ifdef CONFIG_X86_MCE_AMD if ((adev->asic_type == CHIP_ALDEBARAN) && (adev->gmc.xgmi.connected_to_cpu)) @@ -2618,6 +2796,11 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) if (!data) return 0; + if (con->page_retirement_thread) + kthread_stop(con->page_retirement_thread); + + atomic_set(&con->page_retirement_req_cnt, 0); + cancel_work_sync(&con->recovery_work); mutex_lock(&con->recovery_lock); @@ -2679,6 +2862,87 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); } +/* Query ras capablity via atomfirmware interface */ +static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev) +{ + /* mem_ecc cap */ + if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { + dev_info(adev->dev, "MEM ECC is active.\n"); + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else { + dev_info(adev->dev, "MEM ECC is not presented.\n"); + } + + /* sram_ecc cap */ + if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { + dev_info(adev->dev, "SRAM ECC is active.\n"); + if (!amdgpu_sriov_vf(adev)) + adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + else + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | + 1 << AMDGPU_RAS_BLOCK__SDMA | + 1 << AMDGPU_RAS_BLOCK__GFX); + + /* + * VCN/JPEG RAS can be supported on both bare metal and + * SRIOV environment + */ + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) || + amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) || + amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3)) + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | + 1 << AMDGPU_RAS_BLOCK__JPEG); + else + adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | + 1 << AMDGPU_RAS_BLOCK__JPEG); + + /* + * XGMI RAS is not supported if xgmi num physical nodes + * is zero + */ + if (!adev->gmc.xgmi.num_physical_nodes) + adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); + } else { + dev_info(adev->dev, "SRAM ECC is not presented.\n"); + } +} + +/* Query poison mode from umc/df IP callbacks */ +static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + bool df_poison, umc_poison; + + /* poison setting is useless on SRIOV guest */ + if (amdgpu_sriov_vf(adev) || !con) + return; + + /* Init poison supported flag, the default value is false */ + if (adev->gmc.xgmi.connected_to_cpu || + adev->gmc.is_app_apu) { + /* enabled by default when GPU is connected to CPU */ + con->poison_supported = true; + } else if (adev->df.funcs && + adev->df.funcs->query_ras_poison_mode && + adev->umc.ras && + adev->umc.ras->query_ras_poison_mode) { + df_poison = + adev->df.funcs->query_ras_poison_mode(adev); + umc_poison = + adev->umc.ras->query_ras_poison_mode(adev); + + /* Only poison is set in both DF and UMC, we can support it */ + if (df_poison && umc_poison) + con->poison_supported = true; + else if (df_poison != umc_poison) + dev_warn(adev->dev, + "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", + df_poison, umc_poison); + } +} + /* * check hardware's ras ability which will be saved in hw_supported. * if hardware does not support ras, we can skip some ras initializtion and @@ -2695,49 +2959,13 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) if (!amdgpu_ras_asic_supported(adev)) return; - if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { - if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { - dev_info(adev->dev, "MEM ECC is active.\n"); - adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | - 1 << AMDGPU_RAS_BLOCK__DF); - } else { - dev_info(adev->dev, "MEM ECC is not presented.\n"); - } - - if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { - dev_info(adev->dev, "SRAM ECC is active.\n"); - if (!amdgpu_sriov_vf(adev)) - adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | - 1 << AMDGPU_RAS_BLOCK__DF); - else - adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | - 1 << AMDGPU_RAS_BLOCK__SDMA | - 1 << AMDGPU_RAS_BLOCK__GFX); - - /* VCN/JPEG RAS can be supported on both bare metal and - * SRIOV environment - */ - if (amdgpu_ip_version(adev, VCN_HWIP, 0) == - IP_VERSION(2, 6, 0) || - amdgpu_ip_version(adev, VCN_HWIP, 0) == - IP_VERSION(4, 0, 0) || - amdgpu_ip_version(adev, VCN_HWIP, 0) == - IP_VERSION(4, 0, 3)) - adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | - 1 << AMDGPU_RAS_BLOCK__JPEG); - else - adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | - 1 << AMDGPU_RAS_BLOCK__JPEG); + /* query ras capability from psp */ + if (amdgpu_psp_get_ras_capability(&adev->psp)) + goto init_ras_enabled_flag; - /* - * XGMI RAS is not supported if xgmi num physical nodes - * is zero - */ - if (!adev->gmc.xgmi.num_physical_nodes) - adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); - } else { - dev_info(adev->dev, "SRAM ECC is not presented.\n"); - } + /* query ras capablity from bios */ + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { + amdgpu_ras_query_ras_capablity_from_vbios(adev); } else { /* driver only manages a few IP blocks RAS feature * when GPU is connected cpu through XGMI */ @@ -2746,13 +2974,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 1 << AMDGPU_RAS_BLOCK__MMHUB); } + /* apply asic specific settings (vega20 only for now) */ amdgpu_ras_get_quirks(adev); + /* query poison mode from umc/df ip callback */ + amdgpu_ras_query_poison_mode(adev); + +init_ras_enabled_flag: /* hw_supported needs to be aligned with RAS block mask. */ adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : adev->ras_hw_enabled & amdgpu_ras_mask; + + /* aca is disabled by default */ + adev->aca.is_enabled = false; } static void amdgpu_ras_counte_dw(struct work_struct *work) @@ -2780,39 +3016,6 @@ Out: pm_runtime_put_autosuspend(dev->dev); } -static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) -{ - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - bool df_poison, umc_poison; - - /* poison setting is useless on SRIOV guest */ - if (amdgpu_sriov_vf(adev) || !con) - return; - - /* Init poison supported flag, the default value is false */ - if (adev->gmc.xgmi.connected_to_cpu || - adev->gmc.is_app_apu) { - /* enabled by default when GPU is connected to CPU */ - con->poison_supported = true; - } else if (adev->df.funcs && - adev->df.funcs->query_ras_poison_mode && - adev->umc.ras && - adev->umc.ras->query_ras_poison_mode) { - df_poison = - adev->df.funcs->query_ras_poison_mode(adev); - umc_poison = - adev->umc.ras->query_ras_poison_mode(adev); - - /* Only poison is set in both DF and UMC, we can support it */ - if (df_poison && umc_poison) - con->poison_supported = true; - else if (df_poison != umc_poison) - dev_warn(adev->dev, - "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", - df_poison, umc_poison); - } -} - static int amdgpu_get_ras_schema(struct amdgpu_device *adev) { return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 | @@ -2917,12 +3120,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev) goto release_con; } - amdgpu_ras_query_poison_mode(adev); - /* Packed socket_id to ras feature mask bits[31:29] */ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) - con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29); + con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << + AMDGPU_RAS_FEATURES_SOCKETID_SHIFT); /* Get RAS schema for particular SOC */ con->schema = amdgpu_get_ras_schema(adev); @@ -3128,7 +3330,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev) amdgpu_ras_disable_all_features(adev, 0); /* Make sure all ras objects are disabled. */ - if (con->features) + if (AMDGPU_RAS_GET_FEATURES(con->features)) amdgpu_ras_disable_all_features(adev, 1); } @@ -3142,15 +3344,29 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; - amdgpu_ras_set_mca_debug_mode(adev, false); + if (amdgpu_aca_is_enabled(adev)) { + if (amdgpu_in_reset(adev)) + r = amdgpu_aca_reset(adev); + else + r = amdgpu_aca_init(adev); + if (r) + return r; + + amdgpu_ras_set_aca_debug_mode(adev, false); + } else { + amdgpu_ras_set_mca_debug_mode(adev, false); + } list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { - if (!node->ras_obj) { + obj = node->ras_obj; + if (!obj) { dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); continue; } - obj = node->ras_obj; + if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block)) + continue; + if (obj->ras_late_init) { r = obj->ras_late_init(adev, &obj->ras_comm); if (r) { @@ -3175,7 +3391,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev) /* Need disable ras on all IPs here before ip [hw/sw]fini */ - if (con->features) + if (AMDGPU_RAS_GET_FEATURES(con->features)) amdgpu_ras_disable_all_features(adev, 0); amdgpu_ras_recovery_fini(adev); return 0; @@ -3208,10 +3424,13 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) amdgpu_ras_fs_fini(adev); amdgpu_ras_interrupt_remove_all(adev); - WARN(con->features, "Feature mask is not cleared"); + if (amdgpu_aca_is_enabled(adev)) + amdgpu_aca_fini(adev); - if (con->features) - amdgpu_ras_disable_all_features(adev, 1); + WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); + + if (AMDGPU_RAS_GET_FEATURES(con->features)) + amdgpu_ras_disable_all_features(adev, 0); cancel_delayed_work_sync(&con->ras_counte_delay_work); @@ -3425,22 +3644,41 @@ int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) if (con) { ret = amdgpu_mca_smu_set_debug_mode(adev, enable); if (!ret) - con->is_mca_debug_mode = enable; + con->is_aca_debug_mode = enable; + } + + return ret; +} + +int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret = 0; + + if (con) { + if (amdgpu_aca_is_enabled(adev)) + ret = amdgpu_aca_smu_set_debug_mode(adev, enable); + else + ret = amdgpu_mca_smu_set_debug_mode(adev, enable); + if (!ret) + con->is_aca_debug_mode = enable; } return ret; } -bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev) +bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; if (!con) return false; - if (mca_funcs && mca_funcs->mca_set_debug_mode) - return con->is_mca_debug_mode; + if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) || + (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode)) + return con->is_aca_debug_mode; else return true; } @@ -3450,15 +3688,16 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; + const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; if (!con) { *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; return false; } - if (mca_funcs && mca_funcs->mca_set_debug_mode) + if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) *error_query_mode = - (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; + (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; else *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; @@ -3699,8 +3938,7 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct } static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, - struct ras_err_addr *err_addr) + struct amdgpu_smuio_mcm_config_info *mcm_info) { struct ras_err_node *err_node; @@ -3712,10 +3950,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d if (!err_node) return NULL; - memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); + INIT_LIST_HEAD(&err_node->err_info.err_addr_list); - if (err_addr) - memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr)); + memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); err_data->err_list_count++; list_add_tail(&err_node->node, &err_data->err_node_list); @@ -3724,6 +3961,29 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d return &err_node->err_info; } +void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr) +{ + struct ras_err_addr *mca_err_addr; + + mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL); + if (!mca_err_addr) + return; + + INIT_LIST_HEAD(&mca_err_addr->node); + + mca_err_addr->err_status = err_addr->err_status; + mca_err_addr->err_ipid = err_addr->err_ipid; + mca_err_addr->err_addr = err_addr->err_addr; + + list_add_tail(&mca_err_addr->node, &err_info->err_addr_list); +} + +void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr) +{ + list_del(&mca_err_addr->node); + kfree(mca_err_addr); +} + int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, struct amdgpu_smuio_mcm_config_info *mcm_info, struct ras_err_addr *err_addr, u64 count) @@ -3736,10 +3996,13 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, if (!count) return 0; - err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr); + err_info = amdgpu_ras_error_get_info(err_data, mcm_info); if (!err_info) return -EINVAL; + if (err_addr && err_addr->err_status) + amdgpu_ras_add_mca_err_addr(err_info, err_addr); + err_info->ue_count += count; err_data->ue_count += count; @@ -3758,7 +4021,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, if (!count) return 0; - err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr); + err_info = amdgpu_ras_error_get_info(err_data, mcm_info); if (!err_info) return -EINVAL; @@ -3767,3 +4030,135 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, return 0; } + +int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count) +{ + struct ras_err_info *err_info; + + if (!err_data || !mcm_info) + return -EINVAL; + + if (!count) + return 0; + + err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + if (!err_info) + return -EINVAL; + + if (err_addr && err_addr->err_status) + amdgpu_ras_add_mca_err_addr(err_info, err_addr); + + err_info->de_count += count; + err_data->de_count += count; + + return 0; +} + +#define mmMP0_SMN_C2PMSG_92 0x1609C +#define mmMP0_SMN_C2PMSG_126 0x160BE +static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev, + u32 instance, u32 boot_error) +{ + u32 socket_id, aid_id, hbm_id; + u32 reg_data; + u64 reg_addr; + + socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error); + aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error); + hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error); + + /* The pattern for smn addressing in other SOC could be different from + * the one for aqua_vanjaram. We should revisit the code if the pattern + * is changed. In such case, replace the aqua_vanjaram implementation + * with more common helper */ + reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + + aqua_vanjaram_encode_ext_smn_addressing(instance); + + reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); + dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n", + socket_id, aid_id, reg_data); + + if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n", + socket_id, aid_id, hbm_id); + + if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n", + socket_id, aid_id); + + if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n", + socket_id, aid_id); + + if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n", + socket_id, aid_id); + + if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n", + socket_id, aid_id); + + if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n", + socket_id, aid_id); + + if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n", + socket_id, aid_id, hbm_id); + + if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error)) + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n", + socket_id, aid_id, hbm_id); +} + +static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev, + u32 instance, u32 *boot_error) +{ + u32 reg_addr; + u32 reg_data; + int retry_loop; + + reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + + aqua_vanjaram_encode_ext_smn_addressing(instance); + + for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { + reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); + if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) { + *boot_error = AMDGPU_RAS_BOOT_SUCEESS; + return 0; + } + msleep(1); + } + + /* The pattern for smn addressing in other SOC could be different from + * the one for aqua_vanjaram. We should revisit the code if the pattern + * is changed. In such case, replace the aqua_vanjaram implementation + * with more common helper */ + reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) + + aqua_vanjaram_encode_ext_smn_addressing(instance); + + for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { + reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); + if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) { + *boot_error = reg_data; + return 0; + } + msleep(1); + } + + *boot_error = reg_data; + return -ETIME; +} + +void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) +{ + u32 boot_error = 0; + u32 i; + + for (i = 0; i < num_instances; i++) { + if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error)) + amdgpu_ras_boot_time_error_reporting(adev, i, boot_error); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 76fb85628716..d10e5bb0e52f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -29,9 +29,28 @@ #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" #include "amdgpu_smuio.h" +#include "amdgpu_aca.h" struct amdgpu_iv_entry; +#define AMDGPU_RAS_GPU_ERR_MEM_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 0, 0) +#define AMDGPU_RAS_GPU_ERR_FW_LOAD(x) AMDGPU_GET_REG_FIELD(x, 1, 1) +#define AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 2, 2) +#define AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 3, 3) +#define AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 4, 4) +#define AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 5, 5) +#define AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(x) AMDGPU_GET_REG_FIELD(x, 6, 6) +#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7) +#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8) +#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11) +#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13) +#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31) + +#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000 +#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA +#define AMDGPU_RAS_BOOT_STATUS_MASK 0xFF +#define AMDGPU_RAS_BOOT_SUCEESS 0x80000000 + #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) /* position of instance value in sub_block_index of * ta_ras_trigger_error_input, the sub block uses lower 12 bits @@ -39,6 +58,12 @@ struct amdgpu_iv_entry; #define AMDGPU_RAS_INST_MASK 0xfffff000 #define AMDGPU_RAS_INST_SHIFT 0xc +#define AMDGPU_RAS_FEATURES_SOCKETID_SHIFT 29 +#define AMDGPU_RAS_FEATURES_SOCKETID_MASK 0xe0000000 + +/* The high three bits indicates socketid */ +#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK) + enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__SDMA, @@ -57,6 +82,8 @@ enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__MCA, AMDGPU_RAS_BLOCK__VCN, AMDGPU_RAS_BLOCK__JPEG, + AMDGPU_RAS_BLOCK__IH, + AMDGPU_RAS_BLOCK__MPIO, AMDGPU_RAS_BLOCK__LAST }; @@ -441,10 +468,15 @@ struct amdgpu_ras { /* Indicates smu whether need update bad channel info */ bool update_channel_flag; /* Record status of smu mca debug mode */ - bool is_mca_debug_mode; + bool is_aca_debug_mode; /* Record special requirements of gpu reset caller */ uint32_t gpu_reset_flags; + + struct task_struct *page_retirement_thread; + wait_queue_head_t page_retirement_wq; + struct mutex page_retirement_lock; + atomic_t page_retirement_req_cnt; }; struct ras_fs_data { @@ -453,6 +485,7 @@ struct ras_fs_data { }; struct ras_err_addr { + struct list_head node; uint64_t err_status; uint64_t err_ipid; uint64_t err_addr; @@ -462,7 +495,8 @@ struct ras_err_info { struct amdgpu_smuio_mcm_config_info mcm_info; u64 ce_count; u64 ue_count; - struct ras_err_addr err_addr; + u64 de_count; + struct list_head err_addr_list; }; struct ras_err_node { @@ -473,6 +507,7 @@ struct ras_err_node { struct ras_err_data { unsigned long ue_count; unsigned long ce_count; + unsigned long de_count; unsigned long err_addr_cnt; struct eeprom_table_record *err_addr; u32 err_list_count; @@ -529,6 +564,8 @@ struct ras_manager { struct ras_ih_data ih_data; struct ras_err_data err_data; + + struct aca_handle aca_handle; }; struct ras_badpage { @@ -548,6 +585,7 @@ struct ras_query_if { struct ras_common_if head; unsigned long ue_count; unsigned long ce_count; + unsigned long de_count; }; struct ras_inject_if { @@ -781,7 +819,8 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev); int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con); int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); -bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev); +int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable); +bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev); bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, unsigned int *mode); @@ -818,5 +857,20 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, struct amdgpu_smuio_mcm_config_info *mcm_info, struct ras_err_addr *err_addr, u64 count); +int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count); +void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances); +int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, + const struct aca_info *aca_info, void *data); +int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk); + +ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, + struct aca_handle *handle, char *buf, void *data); + +void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, + struct ras_err_addr *err_addr); +void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, + struct ras_err_addr *mca_err_addr); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2fde93b00cab..b12808c0c331 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -735,6 +735,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; control->tbl_rai.health_percent = 0; } + + /* ignore the -ENOTSUPP return value */ + amdgpu_dpm_send_rma_reason(adev); } if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index 2c3675d91614..db5791e1a7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -241,7 +241,7 @@ void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) table_size = le32_to_cpu(hdr->jt_size); } - for (i = 0; i < table_size; i ++) { + for (i = 0; i < table_size; i++) { dst_ptr[bo_offset + i] = cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index b591d33af264..5a17e0ff2ab8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -169,7 +169,7 @@ struct amdgpu_rlc_funcs { void (*stop)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); - void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); + void (*update_spm_vmid)(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid); bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c index 7a6a67275404..e22cb2b5cd92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -36,13 +36,24 @@ */ /** + * amdgpu_seq64_get_va_base - Get the seq64 va base address + * + * @adev: amdgpu_device pointer + * + * Returns: + * va base address on success + */ +static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) +{ + return AMDGPU_VA_RESERVED_SEQ64_START(adev); +} + +/** * amdgpu_seq64_map - Map the seq64 memory to VM * * @adev: amdgpu_device pointer * @vm: vm pointer * @bo_va: bo_va pointer - * @seq64_addr: seq64 vaddr start address - * @size: seq64 pool size * * Map the seq64 memory to the given VM. * @@ -50,11 +61,11 @@ * 0 on success or a negative error code on failure */ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va, u64 seq64_addr, - uint32_t size) + struct amdgpu_bo_va **bo_va) { struct amdgpu_bo *bo; struct drm_exec exec; + u64 seq64_addr; int r; bo = adev->seq64.sbo; @@ -77,9 +88,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error; } - r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + seq64_addr = amdgpu_seq64_get_va_base(adev); + r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE, + AMDGPU_PTE_READABLE); if (r) { DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); amdgpu_vm_bo_del(adev, *bo_va); @@ -144,31 +155,25 @@ error: * amdgpu_seq64_alloc - Allocate a 64 bit memory * * @adev: amdgpu_device pointer - * @gpu_addr: allocated gpu VA start address - * @cpu_addr: allocated cpu VA start address + * @va: VA to access the seq in process address space + * @cpu_addr: CPU address to access the seq * * Alloc a 64 bit memory from seq64 pool. * * Returns: * 0 on success or a negative error code on failure */ -int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, - u64 **cpu_addr) +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr) { unsigned long bit_pos; - u32 offset; bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem); + if (bit_pos >= adev->seq64.num_sem) + return -ENOSPC; - if (bit_pos < adev->seq64.num_sem) { - __set_bit(bit_pos, adev->seq64.used); - offset = bit_pos << 6; /* convert to qw offset */ - } else { - return -EINVAL; - } - - *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START; - *cpu_addr = offset + adev->seq64.cpu_base_addr; + __set_bit(bit_pos, adev->seq64.used); + *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev); + *cpu_addr = bit_pos + adev->seq64.cpu_base_addr; return 0; } @@ -177,20 +182,17 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, * amdgpu_seq64_free - Free the given 64 bit memory * * @adev: amdgpu_device pointer - * @gpu_addr: gpu start address to be freed + * @va: gpu start address to be freed * * Free the given 64 bit memory from seq64 pool. - * */ -void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr) +void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va) { - u32 offset; - - offset = gpu_addr - AMDGPU_SEQ64_VADDR_START; + unsigned long bit_pos; - offset >>= 6; - if (offset < adev->seq64.num_sem) - __clear_bit(offset, adev->seq64.used); + bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64); + if (bit_pos < adev->seq64.num_sem) + __clear_bit(bit_pos, adev->seq64.used); } /** @@ -229,7 +231,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev) * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS * 64bit slots */ - r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE, + r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->seq64.sbo, NULL, (void **)&adev->seq64.cpu_base_addr); @@ -238,7 +240,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev) return r; } - memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE); + memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE); adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS; memset(&adev->seq64.used, 0, sizeof(adev->seq64.used)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h index 2196e72be508..4203b2ab318d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h @@ -25,10 +25,9 @@ #ifndef __AMDGPU_SEQ64_H__ #define __AMDGPU_SEQ64_H__ -#define AMDGPU_SEQ64_SIZE (2ULL << 20) -#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8)) -#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000 -#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET) +#include "amdgpu_vm.h" + +#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_VA_RESERVED_SEQ64_SIZE / sizeof(u64)) struct amdgpu_seq64 { struct amdgpu_bo *sbo; @@ -42,7 +41,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev); int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr); void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr); int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size); + struct amdgpu_bo_va **bo_va); void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3e12763e477a..0867fd9e15ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -556,6 +556,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) default: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; + else if (load_type == 3) + return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO; else return AMDGPU_FW_LOAD_PSP; } @@ -678,6 +680,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) return "UMSCH_MM_DATA"; case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: return "UMSCH_MM_CMD_BUFFER"; + case AMDGPU_UCODE_ID_JPEG_RAM: + return "JPEG"; default: return "UNKNOWN UCODE"; } @@ -1060,7 +1064,8 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, int amdgpu_ucode_create_bo(struct amdgpu_device *adev) { - if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) { + if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) && + (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) { amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4244a13f9f22..619445760037 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -511,6 +511,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_UMSCH_MM_DATA, AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER, AMDGPU_UCODE_ID_P2S_TABLE, + AMDGPU_UCODE_ID_JPEG_RAM, AMDGPU_UCODE_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index d65e21914d8c..20436f81856a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "umc_v6_7.h" +#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint64_t err_addr, @@ -85,18 +86,21 @@ out_fini_err_data: return ret; } -static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, - void *ras_error_status, - struct amdgpu_iv_entry *entry, - bool reset) +static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, + void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + unsigned int error_query_mode; int ret = 0; + unsigned long err_count; - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + amdgpu_ras_get_error_query_mode(adev, &error_query_mode); + + mutex_lock(&con->page_retirement_lock); ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); - if (ret == -EOPNOTSUPP) { + if (ret == -EOPNOTSUPP && + error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && adev->umc.ras->ras_block.hw_ops->query_ras_error_count) adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status); @@ -120,7 +124,8 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, */ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status); } - } else if (!ret) { + } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY || + (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) { if (adev->umc.ras && adev->umc.ras->ecc_info_query_ras_error_count) adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status); @@ -147,16 +152,13 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, } /* only uncorrectable error needs gpu reset */ - if (err_data->ue_count) { - dev_info(adev->dev, "%ld uncorrectable hardware errors " - "detected in UMC block\n", - err_data->ue_count); - + if (err_data->ue_count || err_data->de_count) { + err_count = err_data->ue_count + err_data->de_count; if ((amdgpu_bad_page_threshold != 0) && err_data->err_addr_cnt) { amdgpu_ras_add_bad_pages(adev, err_data->err_addr, err_data->err_addr_cnt); - amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count)); + amdgpu_ras_save_bad_pages(adev, &err_count); amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); @@ -165,20 +167,87 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, con->update_channel_flag = false; } } - - if (reset) { - /* use mode-2 reset for poison consumption */ - if (!entry) - con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; - amdgpu_ras_reset_gpu(adev); - } } kfree(err_data->err_addr); + + mutex_unlock(&con->page_retirement_lock); +} + +static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry, + bool reset) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + amdgpu_umc_handle_bad_pages(adev, ras_error_status); + + if (err_data->ue_count && reset) { + /* use mode-2 reset for poison consumption */ + if (!entry) + con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + amdgpu_ras_reset_gpu(adev); + } + return AMDGPU_RAS_SUCCESS; } -int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset) +int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, + bool reset, uint32_t timeout_ms) +{ + struct ras_err_data err_data; + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__UMC, + }; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); + uint32_t timeout = timeout_ms; + + memset(&err_data, 0, sizeof(err_data)); + amdgpu_ras_error_data_init(&err_data); + + do { + + amdgpu_umc_handle_bad_pages(adev, &err_data); + + if (timeout && !err_data.de_count) { + msleep(1); + timeout--; + } + + } while (timeout && !err_data.de_count); + + if (!timeout) + dev_warn(adev->dev, "Can't find bad pages\n"); + + if (err_data.de_count) + dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count); + + if (obj) { + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + obj->err_data.de_count += err_data.de_count; + } + + amdgpu_ras_error_data_fini(&err_data); + + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + + if (reset) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + /* use mode-2 reset for poison consumption */ + con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + amdgpu_ras_reset_gpu(adev); + } + + return 0; +} + +int amdgpu_umc_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, bool reset) { int ret = AMDGPU_RAS_SUCCESS; @@ -195,27 +264,41 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset) } if (!amdgpu_sriov_vf(adev)) { - struct ras_err_data err_data; - struct ras_common_if head = { - .block = AMDGPU_RAS_BLOCK__UMC, - }; - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); + if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) { + struct ras_err_data err_data; + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__UMC, + }; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); + + ret = amdgpu_ras_error_data_init(&err_data); + if (ret) + return ret; - ret = amdgpu_ras_error_data_init(&err_data); - if (ret) - return ret; + ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); - ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); + if (ret == AMDGPU_RAS_SUCCESS && obj) { + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + obj->err_data.de_count += err_data.de_count; + } - if (ret == AMDGPU_RAS_SUCCESS && obj) { - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; - } + amdgpu_ras_error_data_fini(&err_data); + } else { + if (reset) { + amdgpu_umc_bad_page_polling_timeout(adev, + reset, MAX_UMC_POISON_POLLING_TIME_SYNC); + } else { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - amdgpu_ras_error_data_fini(&err_data); + atomic_inc(&con->page_retirement_req_cnt); + + wake_up(&con->page_retirement_wq); + } + } } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) - adev->virt.ops->ras_poison_handler(adev); + adev->virt.ops->ras_poison_handler(adev, block); else dev_warn(adev->dev, "No ras_poison_handler interface in SRIOV!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 417a6726c71b..26d2ae498daf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -21,7 +21,7 @@ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ #include "amdgpu_ras.h" - +#include "amdgpu_mca.h" /* * (addr / 256) * 4096, the higher 26 bits in ErrorAddr * is the index of 4KB block @@ -64,6 +64,8 @@ struct amdgpu_umc_ras { void *ras_error_status); void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); + bool (*check_ecc_err_status)(struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, void *ras_error_status); /* support different eeprom table version for different asic */ void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr); }; @@ -100,7 +102,8 @@ struct amdgpu_umc { int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset); +int amdgpu_umc_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, bool reset); int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); @@ -118,4 +121,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, int amdgpu_umc_loop_channels(struct amdgpu_device *adev, umc_func func, void *data); + +int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, + bool reset, uint32_t timeout_ms); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h index 107f9bb0e24f..5b27fc41ffbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h @@ -69,12 +69,12 @@ struct amdgpu_debugfs_gprwave_data { }; enum AMDGPU_DEBUGFS_REGS2_CMDS { - AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0, + AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE = 0, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, }; enum AMDGPU_DEBUGFS_GPRWAVE_CMDS { - AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0, + AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE = 0, }; //reg2 interface diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index bfbf59326ee1..ab820cf52668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -358,7 +358,7 @@ static int setup_umsch_mm_test(struct amdgpu_device *adev, memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data)); - test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; + test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va, test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data)); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f4963330c772..eb2a88991206 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -59,6 +59,7 @@ #define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin" #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" #define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" +#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -82,6 +83,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); +MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -1189,7 +1191,7 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) - adev->virt.ops->ras_poison_handler(adev); + adev->virt.ops->ras_poison_handler(adev, ras_if->block); else dev_warn(adev->dev, "No ras_poison_handler interface in SRIOV for VCN!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 514c98ea144f..1985f71b4373 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -160,6 +160,48 @@ } \ } while (0) +#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \ + ({ \ + uint32_t internal_reg_offset, addr; \ + bool video_range, aon_range; \ + \ + addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ + addr <<= 2; \ + video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \ + ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \ + aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \ + ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \ + if (video_range) \ + internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \ + (VCN_VID_IP_ADDRESS)); \ + else if (aon_range) \ + internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \ + (VCN_AON_IP_ADDRESS)); \ + else \ + internal_reg_offset = (0xFFFFF & addr); \ + \ + internal_reg_offset >>= 2; \ + }) + +#define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ + do { \ + if (!indirect) { \ + WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \ + regUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15( \ + VCN, GET_INST(VCN, inst_idx), \ + regUVD_DPG_LMA_CTL, \ + (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + } else { \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ + offset; \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ + value; \ + } \ + } while (0) + #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) #define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4) #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 0dcff2889e25..6ff7d3fb2008 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -71,59 +71,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) amdgpu_num_kcq = 2; } -void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, - uint32_t reg0, uint32_t reg1, - uint32_t ref, uint32_t mask, - uint32_t xcc_inst) -{ - struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst]; - struct amdgpu_ring *ring = &kiq->ring; - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - - if (adev->mes.ring.sched.ready) { - amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1, - ref, mask); - return; - } - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, - ref, mask); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) - goto failed_undo; - - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for IRQ context */ - if (r < 1 && in_interrupt()) - goto failed_kiq; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq; - - return; - -failed_undo: - amdgpu_ring_undo(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); -failed_kiq: - dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1); -} - /** * amdgpu_virt_request_full_gpu() - request full gpu access * @adev: amdgpu device. @@ -303,11 +250,11 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) if (!*data) goto data_failure; - bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL); + bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL); if (!bps) goto bps_failure; - bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL); if (!bps_bo) goto bps_bo_failure; @@ -340,8 +287,10 @@ static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) for (i = data->last_reserved - 1; i >= 0; i--) { bo = data->bps_bo[i]; - amdgpu_bo_free_kernel(&bo, NULL, NULL); - data->bps_bo[i] = bo; + if (bo) { + amdgpu_bo_free_kernel(&bo, NULL, NULL); + data->bps_bo[i] = bo; + } data->last_reserved = i; } } @@ -381,6 +330,8 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) { struct amdgpu_virt *virt = &adev->virt; struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct ttm_resource_manager *man = &mgr->manager; struct amdgpu_bo *bo = NULL; uint64_t bp; int i; @@ -396,12 +347,18 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) * 2) a ras bad page has been reserved (duplicate error injection * for one page); */ - if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, - AMDGPU_GPU_PAGE_SIZE, - &bo, NULL)) - DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); - - data->bps_bo[i] = bo; + if (ttm_resource_manager_used(man)) { + amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, + bp << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE); + data->bps_bo[i] = NULL; + } else { + if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE, + &bo, NULL)) + DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); + data->bps_bo[i] = bo; + } data->last_reserved = i + 1; bo = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d4207e44141f..fa7be5f277b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -88,7 +88,8 @@ struct amdgpu_virt_ops { int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3); - void (*ras_poison_handler)(struct amdgpu_device *adev); + void (*ras_poison_handler)(struct amdgpu_device *adev, + enum amdgpu_ras_block block); }; /* @@ -332,10 +333,6 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, - uint32_t reg0, uint32_t rreg1, - uint32_t ref, uint32_t mask, - uint32_t xcc_inst); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 453a4b786cfc..8baa2e0935cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -660,8 +660,7 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .set_powergating_state = amdgpu_vkms_set_powergating_state, }; -const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = -{ +const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 1, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b8fcb6c55698..ed4a8c5d26d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -234,6 +234,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) } /** + * amdgpu_vm_bo_evicted_user - vm_bo is evicted + * + * @vm_bo: vm_bo which is evicted + * + * State for BOs used by user mode queues which are not at the location they + * should be. + */ +static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) +{ + vm_bo->moved = true; + spin_lock(&vm_bo->vm->status_lock); + list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); + spin_unlock(&vm_bo->vm->status_lock); +} + +/** * amdgpu_vm_bo_relocated - vm_bo is reloacted * * @vm_bo: vm_bo which is relocated @@ -427,21 +443,25 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) } /** - * amdgpu_vm_validate_pt_bos - validate the page table BOs + * amdgpu_vm_validate - validate evicted BOs tracked in the VM * * @adev: amdgpu device pointer * @vm: vm providing the BOs + * @ticket: optional reservation ticket used to reserve the VM * @validate: callback to do the validation * @param: parameter for the validation callback * - * Validate the page table BOs on command submission if neccessary. + * Validate the page table BOs and per-VM BOs on command submission if + * necessary. If a ticket is given, also try to validate evicted user queue + * BOs. They must already be reserved with the given ticket. * * Returns: * Validation result. */ -int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int (*validate)(void *p, struct amdgpu_bo *bo), - void *param) +int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket, + int (*validate)(void *p, struct amdgpu_bo *bo), + void *param) { struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *shadow; @@ -484,6 +504,28 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } spin_lock(&vm->status_lock); } + while (ticket && !list_empty(&vm->evicted_user)) { + bo_base = list_first_entry(&vm->evicted_user, + struct amdgpu_vm_bo_base, + vm_status); + spin_unlock(&vm->status_lock); + + bo = bo_base->bo; + + if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) { + pr_warn_ratelimited("Evicted user BO is not reserved in pid %d\n", + vm->task_info.pid); + return -EINVAL; + } + + r = validate(param, bo); + if (r) + return r; + + amdgpu_vm_bo_invalidated(bo_base); + + spin_lock(&vm->status_lock); + } spin_unlock(&vm->status_lock); amdgpu_vm_eviction_lock(vm); @@ -651,7 +693,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); + adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid); if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && gds_switch_needed) { @@ -1426,11 +1468,21 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, } r = amdgpu_vm_bo_update(adev, bo_va, clear); - if (r) - return r; if (unlock) dma_resv_unlock(resv); + if (r) + return r; + + /* Remember evicted DMABuf imports in compute VMs for later + * validation + */ + if (vm->is_compute_context && + bo_va->base.bo->tbo.base.import_attach && + (!bo_va->base.bo->tbo.resource || + bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) + amdgpu_vm_bo_evicted_user(&bo_va->base); + spin_lock(&vm->status_lock); } spin_unlock(&vm->status_lock); @@ -2196,6 +2248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; INIT_LIST_HEAD(&vm->evicted); + INIT_LIST_HEAD(&vm->evicted_user); INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->idle); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4740dd65b99d..42f6ddec50c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -135,8 +135,21 @@ struct amdgpu_mem_stats; #define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START) #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS) -/* Reserve 2MB at top/bottom of address space for kernel use */ -#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) +/* Reserve space at top/bottom of address space for kernel use */ +#define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20) +#define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \ + << AMDGPU_GPU_PAGE_SHIFT) \ + - AMDGPU_VA_RESERVED_CSA_SIZE) +#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20) +#define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \ + - AMDGPU_VA_RESERVED_SEQ64_SIZE) +#define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12) +#define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \ + - AMDGPU_VA_RESERVED_TRAP_SIZE) +#define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16) +#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \ + AMDGPU_VA_RESERVED_SEQ64_SIZE + \ + AMDGPU_VA_RESERVED_CSA_SIZE) /* See vm_update_mode */ #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) @@ -288,9 +301,12 @@ struct amdgpu_vm { /* Lock to protect vm_bo add/del/move on all lists of vm */ spinlock_t status_lock; - /* BOs who needs a validation */ + /* Per-VM and PT BOs who needs a validation */ struct list_head evicted; + /* BOs for user mode queues that need a validation */ + struct list_head evicted_user; + /* PT BOs which relocated and their parent need an update */ struct list_head relocated; @@ -434,9 +450,10 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, unsigned int num_fences); bool amdgpu_vm_ready(struct amdgpu_vm *vm); uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); -int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int (*callback)(void *p, struct amdgpu_bo *bo), - void *param); +int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct ww_acquire_ctx *ticket, + int (*callback)(void *p, struct amdgpu_bo *bo), + void *param); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); int amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index a6c88f2fe6e5..20d51f6c9bb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1035,15 +1035,74 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) return 0; } +static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + struct aca_bank_report *report, void *data) +{ + struct amdgpu_device *adev = handle->adev; + const char *error_str; + u64 status; + int ret, ext_error_code; + + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; + + status = bank->regs[ACA_REG_IDX_STATUS]; + ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); + + error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ? + xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL; + if (error_str) + dev_info(adev->dev, "%s detected\n", error_str); + + if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) || + (type == ACA_ERROR_TYPE_CE && ext_error_code == 6)) + report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); + + return 0; +} + +static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = { + .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report, +}; + +static const struct aca_info xgmi_v6_4_0_aca_info = { + .hwip = ACA_HWIP_TYPE_PCS_XGMI, + .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, + .bank_ops = &xgmi_v6_4_0_aca_bank_ops, +}; + static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { + int r; + if (!adev->gmc.xgmi.supported || adev->gmc.xgmi.num_physical_nodes == 0) return 0; amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL); - return amdgpu_ras_block_late_init(adev, ras_block); + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL, + &xgmi_v6_4_0_aca_info, NULL); + if (r) + goto late_fini; + break; + default: + break; + } + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; } uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, @@ -1099,7 +1158,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev) static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base) { - WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL); + WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL); } static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst) @@ -1277,12 +1336,12 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += ce_cnt; } -static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status) +static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status) { const char *error_str; int ext_error_code; - ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status); + ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ? xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL; @@ -1291,9 +1350,9 @@ static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdg switch (ext_error_code) { case 0: - return AMDGPU_MCA_ERROR_TYPE_UE; + return ACA_ERROR_TYPE_UE; case 6: - return AMDGPU_MCA_ERROR_TYPE_CE; + return ACA_ERROR_TYPE_CE; default: return -EINVAL; } @@ -1307,22 +1366,22 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a int xgmi_inst = mcm_info->die_id; u64 status = 0; - status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS); - if (!MCA_REG__STATUS__VAL(status)) + status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS); + if (!ACA_REG__STATUS__VAL(status)) return; switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { - case AMDGPU_MCA_ERROR_TYPE_UE: + case ACA_ERROR_TYPE_UE: amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL); break; - case AMDGPU_MCA_ERROR_TYPE_CE: + case ACA_ERROR_TYPE_CE: amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL); break; default: break; } - WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL); + WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL); } static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data) diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c new file mode 100644 index 000000000000..8a0773b80864 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c @@ -0,0 +1,122 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "athub_v4_1_0.h" +#include "athub/athub_4_1_0_offset.h" +#include "athub/athub_4_1_0_sh_mask.h" +#include "soc15_common.h" + +static uint32_t athub_v4_1_0_get_cg_cntl(struct amdgpu_device *adev) +{ + uint32_t data; + + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { + case IP_VERSION(4, 1, 0): + data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL); + break; + default: + data = 0; + break; + } + return data; +} + +static void athub_v4_1_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data) +{ + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { + case IP_VERSION(4, 1, 0): + WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data); + break; + default: + break; + } +} + +static void +athub_v4_1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = athub_v4_1_0_get_cg_cntl(adev); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG)) + data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK; + else + data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK; + + if (def != data) + athub_v4_1_0_set_cg_cntl(adev, data); +} + +static void +athub_v4_1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = athub_v4_1_0_get_cg_cntl(adev); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS)) + data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK; + else + data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK; + + if (def != data) + athub_v4_1_0_set_cg_cntl(adev, data); +} + +int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { + case IP_VERSION(4, 1, 0): + athub_v4_1_0_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + athub_v4_1_0_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + break; + default: + break; + } + + return 0; +} + +void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags) +{ + int data; + + /* AMD_CG_SUPPORT_ATHUB_MGCG */ + data = athub_v4_1_0_get_cg_cntl(adev); + if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_ATHUB_MGCG; + + /* AMD_CG_SUPPORT_ATHUB_LS */ + if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_ATHUB_LS; +} diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h new file mode 100644 index 000000000000..4d18d0998fa8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __ATHUB_V4_1_0_H__ +#define __ATHUB_V4_1_0_H__ + +int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state); +void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index a33e890c70d9..b888613f653f 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -62,6 +62,7 @@ typedef struct { struct atom_context *ctx; uint32_t *ps, *ws; + int ps_size, ws_size; int ps_shift; uint16_t start; unsigned last_jump; @@ -70,8 +71,8 @@ typedef struct { } atom_exec_context; int amdgpu_atom_debug; -static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); -int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); +static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size); +int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size); static uint32_t atom_arg_mask[8] = { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, @@ -223,7 +224,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, (*ptr)++; /* get_unaligned_le32 avoids unaligned accesses from atombios * tables, noticed on a DEC Alpha. */ - val = get_unaligned_le32((u32 *)&ctx->ps[idx]); + if (idx < ctx->ps_size) + val = get_unaligned_le32((u32 *)&ctx->ps[idx]); + else + pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size); if (print) DEBUG("PS[0x%02X,0x%04X]", idx, val); break; @@ -261,7 +265,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, val = gctx->reg_block; break; default: - val = ctx->ws[idx]; + if (idx < ctx->ws_size) + val = ctx->ws[idx]; + else + pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size); } break; case ATOM_ARG_ID: @@ -495,6 +502,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, idx = U8(*ptr); (*ptr)++; DEBUG("PS[0x%02X]", idx); + if (idx >= ctx->ps_size) { + pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size); + return; + } ctx->ps[idx] = cpu_to_le32(val); break; case ATOM_ARG_WS: @@ -527,6 +538,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, gctx->reg_block = val; break; default: + if (idx >= ctx->ws_size) { + pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size); + return; + } ctx->ws[idx] = val; } break; @@ -624,7 +639,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) else SDEBUG(" table: %d\n", idx); if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) - r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); + r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift); if (r) { ctx->abort = true; } @@ -1203,7 +1218,7 @@ static struct { atom_op_div32, ATOM_ARG_WS}, }; -static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) +static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; @@ -1225,12 +1240,16 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, ectx.ps_shift = ps / 4; ectx.start = base; ectx.ps = params; + ectx.ps_size = params_size; ectx.abort = false; ectx.last_jump = 0; - if (ws) + if (ws) { ectx.ws = kcalloc(4, ws, GFP_KERNEL); - else + ectx.ws_size = ws; + } else { ectx.ws = NULL; + ectx.ws_size = 0; + } debug_depth++; while (1) { @@ -1264,7 +1283,7 @@ free: return ret; } -int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) +int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size) { int r; @@ -1280,7 +1299,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *par /* reset divmul */ ctx->divmul[0] = 0; ctx->divmul[1] = 0; - r = amdgpu_atom_execute_table_locked(ctx, index, params); + r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size); mutex_unlock(&ctx->mutex); return r; } @@ -1552,7 +1571,7 @@ int amdgpu_atom_asic_init(struct atom_context *ctx) if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; - ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps); + ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index c11cf18a0f18..b807f6639a4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -156,7 +156,7 @@ struct atom_context { extern int amdgpu_atom_debug; struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios); -int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); +int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size); int amdgpu_atom_asic_init(struct atom_context *ctx); void amdgpu_atom_destroy(struct atom_context *ctx); bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 10098fdd33fc..3dfc28840a7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -77,7 +77,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border); break; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) @@ -106,7 +106,7 @@ void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) args.ucEnable = ATOM_SCALER_DISABLE; break; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock) @@ -123,7 +123,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock) args.ucCRTC = amdgpu_crtc->crtc_id; args.ucEnable = lock; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state) @@ -139,7 +139,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state) args.ucCRTC = amdgpu_crtc->crtc_id; args.ucEnable = state; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state) @@ -155,7 +155,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state) args.ucCRTC = amdgpu_crtc->crtc_id; args.ucBlanking = state; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) @@ -171,7 +171,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) args.ucDispPipeId = amdgpu_crtc->crtc_id; args.ucEnable = state; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) @@ -183,7 +183,7 @@ void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) args.ucEnable = ATOM_INIT; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, @@ -228,7 +228,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = amdgpu_crtc->crtc_id; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } union atom_enable_ss { @@ -293,7 +293,7 @@ static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev, args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v3.ucEnable = enable; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } union adjust_pixel_clock { @@ -395,7 +395,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc, ADJUST_DISPLAY_CONFIG_SS_ENABLE; amdgpu_atom_execute_table(adev->mode_info.atom_context, - index, (uint32_t *)&args); + index, (uint32_t *)&args, sizeof(args)); adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; case 3: @@ -428,7 +428,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.ucExtTransmitterID = 0; amdgpu_atom_execute_table(adev->mode_info.atom_context, - index, (uint32_t *)&args); + index, (uint32_t *)&args, sizeof(args)); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; @@ -514,7 +514,7 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } union set_dce_clock { @@ -544,7 +544,7 @@ u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev, args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */ args.v2_1.asParam.ucDCEClkType = clk_type; args.v2_1.asParam.ucDCEClkSrc = clk_src; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10; break; default: @@ -740,7 +740,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, return; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 87c41e0e9b7c..622634c08c7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -83,7 +83,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, args.v2.ucDelay = delay / 10; args.v2.ucHPD_ID = chan->rec.hpd; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *ack = args.v2.ucReplyStatus; @@ -301,7 +301,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, args.ucLaneNum = lane_num; args.ucStatus = 0; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); return args.ucStatus; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 7672abe6c140..25feab188dfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -335,7 +335,7 @@ amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action) args.ucDacStandard = ATOM_DAC1_PS2; args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } @@ -432,7 +432,7 @@ amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action) break; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) @@ -732,7 +732,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder, break; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } @@ -1136,7 +1136,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a break; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } bool @@ -1164,7 +1164,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector, args.v1.ucAction = action; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); /* wait for the panel to power up */ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { @@ -1288,7 +1288,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder, DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void @@ -1633,7 +1633,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) return; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } /* This only needs to be called once at startup */ @@ -1706,7 +1706,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder, args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; } - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); return true; } else diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index af0335535f82..a6501114322f 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -86,7 +86,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { @@ -172,5 +172,5 @@ void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr args.ucSlaveAddr = slave_addr; args.ucLineNumber = line_number; - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h index 567a904804bc..9c85ca6358c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h @@ -21,8 +21,7 @@ * */ -static const unsigned int gfx9_SECT_CONTEXT_def_1[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_1[] = { 0x00000000, // DB_RENDER_CONTROL 0x00000000, // DB_COUNT_CONTROL 0x00000000, // DB_DEPTH_VIEW @@ -236,8 +235,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] = 0x00000000, // PA_SC_VPORT_ZMIN_15 0x3f800000, // PA_SC_VPORT_ZMAX_15 }; -static const unsigned int gfx9_SECT_CONTEXT_def_2[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_2[] = { 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL 0x00000000, // PA_SC_TILE_STEERING_OVERRIDE 0x00000000, // CP_PERFMON_CNTX_CNTL @@ -521,15 +519,13 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] = 0x00000000, // CB_MRT6_EPITCH 0x00000000, // CB_MRT7_EPITCH }; -static const unsigned int gfx9_SECT_CONTEXT_def_3[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_3[] = { 0x00000000, // PA_CL_POINT_X_RAD 0x00000000, // PA_CL_POINT_Y_RAD 0x00000000, // PA_CL_POINT_SIZE 0x00000000, // PA_CL_POINT_CULL_RAD }; -static const unsigned int gfx9_SECT_CONTEXT_def_4[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_4[] = { 0x00000000, // DB_DEPTH_CONTROL 0x00000000, // DB_EQAA 0x00000000, // CB_COLOR_CONTROL @@ -688,17 +684,14 @@ static const unsigned int gfx9_SECT_CONTEXT_def_4[] = 0x00000000, // VGT_GS_OUT_PRIM_TYPE 0x00000000, // IA_ENHANCE }; -static const unsigned int gfx9_SECT_CONTEXT_def_5[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_5[] = { 0x00000000, // WD_ENHANCE 0x00000000, // VGT_PRIMITIVEID_EN }; -static const unsigned int gfx9_SECT_CONTEXT_def_6[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_6[] = { 0x00000000, // VGT_PRIMITIVEID_RESET }; -static const unsigned int gfx9_SECT_CONTEXT_def_7[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_7[] = { 0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP 0x00000000, // VGT_DRAW_PAYLOAD_CNTL 0, // HOLE @@ -766,8 +759,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] = 0x00000000, // VGT_STRMOUT_CONFIG 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG }; -static const unsigned int gfx9_SECT_CONTEXT_def_8[] = -{ +static const unsigned int gfx9_SECT_CONTEXT_def_8[] = { 0x00000000, // PA_SC_CENTROID_PRIORITY_0 0x00000000, // PA_SC_CENTROID_PRIORITY_1 0x00001000, // PA_SC_LINE_CNTL @@ -924,8 +916,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_8[] = 0x00000000, // CB_COLOR7_DCC_BASE 0x00000000, // CB_COLOR7_DCC_BASE_EXT }; -static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] = -{ +static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] = { {gfx9_SECT_CONTEXT_def_1, 0x0000a000, 212 }, {gfx9_SECT_CONTEXT_def_2, 0x0000a0d6, 282 }, {gfx9_SECT_CONTEXT_def_3, 0x0000a1f5, 4 }, diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h index 66e39cdb5cb0..5fd96ddd7f0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h @@ -21,8 +21,7 @@ * */ -static const u32 si_SECT_CONTEXT_def_1[] = -{ +static const u32 si_SECT_CONTEXT_def_1[] = { 0x00000000, // DB_RENDER_CONTROL 0x00000000, // DB_COUNT_CONTROL 0x00000000, // DB_DEPTH_VIEW @@ -236,8 +235,7 @@ static const u32 si_SECT_CONTEXT_def_1[] = 0x00000000, // PA_SC_VPORT_ZMIN_15 0x3f800000, // PA_SC_VPORT_ZMAX_15 }; -static const u32 si_SECT_CONTEXT_def_2[] = -{ +static const u32 si_SECT_CONTEXT_def_2[] = { 0x00000000, // CP_PERFMON_CNTX_CNTL 0x00000000, // CP_RINGID 0x00000000, // CP_VMID @@ -511,8 +509,7 @@ static const u32 si_SECT_CONTEXT_def_2[] = 0x00000000, // CB_BLEND6_CONTROL 0x00000000, // CB_BLEND7_CONTROL }; -static const u32 si_SECT_CONTEXT_def_3[] = -{ +static const u32 si_SECT_CONTEXT_def_3[] = { 0x00000000, // PA_CL_POINT_X_RAD 0x00000000, // PA_CL_POINT_Y_RAD 0x00000000, // PA_CL_POINT_SIZE @@ -520,8 +517,7 @@ static const u32 si_SECT_CONTEXT_def_3[] = 0x00000000, // VGT_DMA_BASE_HI 0x00000000, // VGT_DMA_BASE }; -static const u32 si_SECT_CONTEXT_def_4[] = -{ +static const u32 si_SECT_CONTEXT_def_4[] = { 0x00000000, // DB_DEPTH_CONTROL 0x00000000, // DB_EQAA 0x00000000, // CB_COLOR_CONTROL @@ -680,16 +676,13 @@ static const u32 si_SECT_CONTEXT_def_4[] = 0x00000000, // VGT_GS_OUT_PRIM_TYPE 0x00000000, // IA_ENHANCE }; -static const u32 si_SECT_CONTEXT_def_5[] = -{ +static const u32 si_SECT_CONTEXT_def_5[] = { 0x00000000, // VGT_PRIMITIVEID_EN }; -static const u32 si_SECT_CONTEXT_def_6[] = -{ +static const u32 si_SECT_CONTEXT_def_6[] = { 0x00000000, // VGT_PRIMITIVEID_RESET }; -static const u32 si_SECT_CONTEXT_def_7[] = -{ +static const u32 si_SECT_CONTEXT_def_7[] = { 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN 0, // HOLE 0, // HOLE @@ -924,8 +917,7 @@ static const u32 si_SECT_CONTEXT_def_7[] = 0x00000000, // CB_COLOR7_CLEAR_WORD0 0x00000000, // CB_COLOR7_CLEAR_WORD1 }; -static const struct cs_extent_def si_SECT_CONTEXT_defs[] = -{ +static const struct cs_extent_def si_SECT_CONTEXT_defs[] = { {si_SECT_CONTEXT_def_1, 0x0000a000, 212 }, {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 }, {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 }, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 587ee632a3b8..221af054d874 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -52,6 +52,7 @@ static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); +static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd); static const u32 crtc_offsets[] = { CRTC0_REGISTER_OFFSET, @@ -364,6 +365,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); + dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f22ec27365bd..69e8b0db6cf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -52,6 +52,7 @@ static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); +static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd); static const u32 crtc_offsets[] = { @@ -388,6 +389,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); + dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 4dbe9b3259b5..60d40201fdd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -273,6 +273,21 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); } +static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev, + int hpd) +{ + u32 tmp; + + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hdp %d\n", hpd); + return; + } + + tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); + tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); +} + /** * dce_v6_0_hpd_init - hpd setup callback. * @@ -312,6 +327,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) continue; } + dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } @@ -3089,7 +3105,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t disp_int, mask, tmp; + uint32_t disp_int, mask; unsigned hpd; if (entry->src_data[0] >= adev->mode_info.num_hpd) { @@ -3102,9 +3118,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, mask = interrupt_status_offsets[hpd].hpd; if (disp_int & mask) { - tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); - tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; - WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); + dce_v6_0_hpd_int_ack(adev, hpd); schedule_delayed_work(&adev->hotplug_work, 0); DRM_DEBUG("IH: HPD%d\n", hpd + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 05bcce23385e..5a5fcc45e452 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -265,6 +265,21 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); } +static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev, + int hpd) +{ + u32 tmp; + + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hdp %d\n", hpd); + return; + } + + tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); + tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); +} + /** * dce_v8_0_hpd_init - hpd setup callback. * @@ -304,6 +319,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) continue; } + dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } @@ -3177,7 +3193,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t disp_int, mask, tmp; + uint32_t disp_int, mask; unsigned hpd; if (entry->src_data[0] >= adev->mode_info.num_hpd) { @@ -3190,9 +3206,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, mask = interrupt_status_offsets[hpd].hpd; if (disp_int & mask) { - tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); - tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; - WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); + dce_v8_0_hpd_int_ack(adev, hpd); schedule_delayed_work(&adev->hotplug_work, 0); DRM_DEBUG("IH: HPD%d\n", hpd + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index dcdecb18b230..b02d63328f1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7947,7 +7947,7 @@ static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); } -static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid) { amdgpu_gfx_off_ctrl(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 4f3bfdc75b37..2fb1342d5bd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -727,7 +727,7 @@ static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) /* init spm vmid with 0xf */ if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); return 0; } @@ -5027,7 +5027,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } -static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) { u32 data; @@ -5041,6 +5041,14 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); amdgpu_gfx_off_ctrl(adev, true); + + if (ring + && amdgpu_sriov_is_pp_one_vf(adev) + && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) + || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { + uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); + amdgpu_ring_emit_wreg(ring, reg, data); + } } static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { @@ -6104,7 +6112,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .get_rptr = gfx_v11_0_ring_get_rptr_gfx, .get_wptr = gfx_v11_0_ring_get_wptr_gfx, .set_wptr = gfx_v11_0_ring_set_wptr_gfx, - .emit_frame_size = /* totally 242 maximum if 16 IBs */ + .emit_frame_size = /* totally 247 maximum if 16 IBs */ + 5 + /* update_spm_vmid */ 5 + /* COND_EXEC */ 9 + /* SET_Q_PREEMPTION_MODE */ 7 + /* PIPELINE_SYNC */ @@ -6154,6 +6163,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .get_wptr = gfx_v11_0_ring_get_wptr_compute, .set_wptr = gfx_v11_0_ring_set_wptr_compute, .emit_frame_size = + 5 + /* update_spm_vmid */ 20 + /* gfx_v11_0_ring_emit_gds_switch */ 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 5 + /* hdp invalidate */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c index 26d6286d86c9..9e7ce1e6bc06 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c @@ -69,7 +69,7 @@ static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) - adev->virt.ops->ras_poison_handler(adev); + adev->virt.ops->ras_poison_handler(adev, ras_if->block); else dev_warn(adev->dev, "No ras_poison_handler interface in SRIOV for %s!\n", ras_if->name); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index c2faf6b4c2fc..86a4865b1ae5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3274,7 +3274,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) /* init spm vmid with 0xf */ if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); return 0; } @@ -3500,7 +3500,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) { u32 data; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1943beb135c4..ea174b76ee70 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1288,7 +1288,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) /* init spm vmid with 0xf */ if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); return 0; } @@ -5579,7 +5579,7 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) } } -static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) { u32 data; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69c500910746..169d45268ef6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v9_0_cp_gfx_enable(adev, true); + /* Now only limit the quirk on the APU gfx9 series and already + * confirmed that the APU gfx10/gfx11 needn't such update. + */ + if (adev->flags & AMD_IS_APU && + adev->in_s3 && !adev->suspend_complete) { + DRM_INFO(" Will skip the CSB packet resubmit\n"); + return 0; + } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); @@ -4894,7 +4902,7 @@ static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); } -static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid) { amdgpu_gfx_off_ctrl(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index bc8416afb62c..f53b379d8971 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -970,8 +970,9 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); } -static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = - { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 }; +static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = { + SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 +}; static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 131cddbdda0d..aace4594a603 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -38,6 +38,7 @@ #include "gfx_v9_4_3.h" #include "amdgpu_xcp.h" +#include "amdgpu_aca.h" MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); @@ -48,6 +49,10 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 +#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */ +#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ +#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ + struct amdgpu_gfx_ras gfx_v9_4_3_ras; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); @@ -675,6 +680,66 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; +static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle, + struct aca_bank *bank, enum aca_error_type type, + struct aca_bank_report *report, void *data) +{ + u64 status, misc0; + u32 instlo; + int ret; + + status = bank->regs[ACA_REG_IDX_STATUS]; + if ((type == ACA_ERROR_TYPE_UE && + ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || + (type == ACA_ERROR_TYPE_CE && + ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; + + /* NOTE: overwrite info.die_id with xcd id for gfx */ + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + } + + return 0; +} + +static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_error_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + switch (instlo) { + case mmSMNAID_XCD0_MCA_SMU: + case mmSMNAID_XCD1_MCA_SMU: + case mmSMNXCD_XCD0_MCA_SMU: + return true; + default: + break; + } + + return false; +} + +static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { + .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report, + .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, +}; + +static const struct aca_info gfx_v9_4_3_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, + .bank_ops = &gfx_v9_4_3_aca_bank_ops, +}; + static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; @@ -1109,7 +1174,7 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) { /* init spm vmid with 0xf */ if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); return 0; } @@ -1320,7 +1385,7 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, +static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) { u32 reg, data; @@ -4242,9 +4307,32 @@ struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, }; +static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX, + &gfx_v9_4_3_aca_info, + NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + struct amdgpu_gfx_ras gfx_v9_4_3_ras = { .ras_block = { .hw_ops = &gfx_v9_4_3_ras_ops, + .ras_late_init = &gfx_v9_4_3_ras_late_init, }, .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 6c5185608854..db89d13bd80d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -262,16 +262,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* flush hdp cache */ adev->hdp.funcs->flush_hdp(adev, NULL); - /* For SRIOV run time, driver shouldn't access the register through MMIO - * Directly use kiq to do the vm invalidation instead + /* This is necessary for SRIOV as well as for GFXOFF to function + * properly under bare metal */ if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid, GET_INST(GC, 0)); + amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid, GET_INST(GC, 0)); return; } + /* This path is needed before KIQ/MES/GFXOFF are set up */ hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index c9c653cfc765..998daa702b44 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -223,16 +223,17 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* flush hdp cache */ adev->hdp.funcs->flush_hdp(adev, NULL); - /* For SRIOV run time, driver shouldn't access the register through MMIO - * Directly use kiq to do the vm invalidation instead + /* This is necessary for SRIOV as well as for GFXOFF to function + * properly under bare metal */ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid, GET_INST(GC, 0)); + amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid, GET_INST(GC, 0)); return; } + /* This path is needed before KIQ/MES/GFXOFF are set up */ hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -570,6 +571,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) adev->mmhub.funcs = &mmhub_v3_0_2_funcs; break; case IP_VERSION(3, 3, 0): + case IP_VERSION(3, 3, 1): adev->mmhub.funcs = &mmhub_v3_3_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 59d9215e5556..23b478639921 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -435,9 +435,10 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) WREG32(mmVM_PRT_CNTL, tmp); if (enable) { - uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; + uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> + AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn - - (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); + (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 45a2f8e031a2..3da7b6a2b00d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -563,9 +563,10 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) WREG32(mmVM_PRT_CNTL, tmp); if (enable) { - uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; + uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> + AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn - - (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); + (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 4422b27a3cc2..969a9e867170 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -777,9 +777,10 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) WREG32(mmVM_PRT_CNTL, tmp); if (enable) { - uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; + uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> + AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn - - (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); + (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 40a00ea0009f..d442ae85162d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -829,23 +829,25 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, req = hub->vm_inv_eng0_req + hub->eng_distance * eng; ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - /* This is necessary for a HW workaround under SRIOV as well - * as GFXOFF under bare metal - */ if (vmhub >= AMDGPU_MMHUB0(0)) inst = GET_INST(GC, 0); else inst = vmhub; + + /* This is necessary for SRIOV as well as for GFXOFF to function + * properly under bare metal + */ if (adev->gfx.kiq[inst].ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, - 1 << vmid, inst); + amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid, inst); return; } + /* This path is needed before KIQ/MES/GFXOFF are set up */ spin_lock(&adev->gmc.invalidate_lock); /* @@ -1947,14 +1949,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { - static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; - u32 vram_info; - - /* Only for dGPU, vendor informaton is reliable */ - if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) { - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); - adev->gmc.vram_vendor = vram_info & 0xF; - } adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c new file mode 100644 index 000000000000..8d7d0813e331 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -0,0 +1,142 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "hdp_v7_0.h" + +#include "hdp/hdp_7_0_0_offset.h" +#include "hdp/hdp_7_0_0_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> + +static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + else + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +} + +static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t hdp_clk_cntl, hdp_clk_cntl1; + uint32_t hdp_mem_pwr_cntl; + + if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD))) + return; + + hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL); + hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL); + + /* Before doing clock/power mode switch, + * forced on IPH & RC clock */ + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + RC_MEM_CLK_SOFT_OVERRIDE, 1); + WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); + + /* disable clock and power gating before any changing */ + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_CTRL_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_LS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_DS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_SD_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_CTRL_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_SD_EN, 0); + WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); + + /* Already disabled above. The actions below are for "enabled" only */ + if (enable) { + /* only one clock gating mode (LS/DS/SD) can be enabled */ + if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_SD_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_SD_EN, 1); + } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_LS_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, 1); + } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_DS_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, 1); + } + + /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to + * be set for SRAM LS/DS/SD */ + if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD)) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + ATOMIC_MEM_POWER_CTRL_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_CTRL_EN, 1); + WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); + } + } + + /* disable IPH & RC clock override after clock/power mode changing */ + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + RC_MEM_CLK_SOFT_OVERRIDE, 0); + WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); +} + +static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev, + u64 *flags) +{ + uint32_t tmp; + + /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ + tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL); + if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_LS; + else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_DS; + else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_SD; +} + +const struct amdgpu_hdp_funcs hdp_v7_0_funcs = { + .flush_hdp = hdp_v7_0_flush_hdp, + .update_clock_gating = hdp_v7_0_update_clock_gating, + .get_clock_gating_state = hdp_v7_0_get_clockgating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h new file mode 100644 index 000000000000..25b69201402d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __HDP_V7_0_H__ +#define __HDP_V7_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_hdp_funcs hdp_v7_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c new file mode 100644 index 000000000000..16fe428c0722 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -0,0 +1,767 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_ih.h" + +#include "oss/osssys_7_0_0_offset.h" +#include "oss/osssys_7_0_0_sh_mask.h" + +#include "soc15_common.h" +#include "ih_v7_0.h" + +#define MAX_REARM_RETRY 10 + +static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * ih_v7_0_init_register_offset - Initialize register offset for ih rings + * + * @adev: amdgpu_device pointer + * + * Initialize register offset ih rings (IH_V7_0). + */ +static void ih_v7_0_init_register_offset(struct amdgpu_device *adev) +{ + struct amdgpu_ih_regs *ih_regs; + + /* ih ring 2 is removed + * ih ring and ih ring 1 are available */ + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; + } + + if (adev->irq.ih1.ring_size) { + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; + } +} + +/** + * force_update_wptr_for_self_int - Force update the wptr for self interrupt + * + * @adev: amdgpu_device pointer + * @threshold: threshold to trigger the wptr reporting + * @timeout: timeout to trigger the wptr reporting + * @enabled: Enable/disable timeout flush mechanism + * + * threshold input range: 0 ~ 15, default 0, + * real_threshold = 2^threshold + * timeout input range: 0 ~ 20, default 8, + * real_timeout = (2^timeout) * 1024 / (socclk_freq) + * + * Force update wptr for self interrupt ( >= SIENNA_CICHLID). + */ +static void +force_update_wptr_for_self_int(struct amdgpu_device *adev, + u32 threshold, u32 timeout, bool enabled) +{ + u32 ih_cntl, ih_rb_cntl; + + ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2); + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1); + + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2, + SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2, + SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_USED_INT_THRESHOLD, threshold); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) + return; + } else { + WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl); + } + + WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl); +} + +/** + * ih_v7_0_toggle_ring_interrupts - toggle the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointet + * @enable: true - enable the interrupts, false - disable the interrupts + * + * Toggle the interrupt ring buffer (IH_V7_0) + */ +static int ih_v7_0_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (enable) { + ih->enabled = true; + } else { + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; + } + + return 0; +} + +/** + * ih_v7_0_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (IH_V7_0). + */ +static int ih_v7_0_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = ih_v7_0_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } + } + + return 0; +} + +static uint32_t ih_v7_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) +{ + int rb_bufsz = order_base_2(ih->ring_size / 4); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + MC_SPACE, ih->use_bus_addr ? 2 : 4); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register + * value is written to memory + */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_WRITEBACK_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); + + return ih_rb_cntl; +} + +static uint32_t ih_v7_0_doorbell_rptr(struct amdgpu_ih_ring *ih) +{ + u32 ih_doorbell_rtpr = 0; + + if (ih->use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + ih->doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 0); + } + return ih_doorbell_rtpr; +} + +/** + * ih_v7_0_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (IH_V7_0) + */ +static int ih_v7_0_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = ih_v7_0_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) { + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + } + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, ih_v7_0_doorbell_rptr(ih)); + + return 0; +} + +/** + * ih_v7_0_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it. + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int ih_v7_0_irq_init(struct amdgpu_device *adev) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1}; + u32 ih_chicken; + u32 tmp; + int ret; + int i; + + /* disable irqs */ + ret = ih_v7_0_toggle_interrupts(adev, false); + if (ret) + return ret; + + adev->nbio.funcs->ih_control(adev); + + if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || + (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) { + if (ih[0]->use_bus_addr) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN); + ih_chicken = REG_SET_FIELD(ih_chicken, + IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken); + } + } + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + ret = ih_v7_0_enable_ring(adev, ih[i]); + if (ret) + return ret; + } + } + + /* update doorbell range for ih ring 0 */ + adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, + ih[0]->doorbell_index); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL); + tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, + CLIENT18_IS_STORM_CLIENT, 1); + WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL); + tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp); + + /* GC/MMHUB UTCL2 page fault interrupts are configured as + * MSI storm capable interrupts by deafult. The delay is + * used to avoid ISR being called too frequently + * when page fault happens on several continuous page + * and thus avoid MSI storm */ + tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL); + tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL, + DELAY, 3); + WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + + pci_set_master(adev->pdev); + + /* enable interrupts */ + ret = ih_v7_0_toggle_interrupts(adev, true); + if (ret) + return ret; + /* enable wptr force update for self int */ + force_update_wptr_for_self_int(adev, 0, 8, true); + + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + + return 0; +} + +/** + * ih_v7_0_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw. + */ +static void ih_v7_0_irq_disable(struct amdgpu_device *adev) +{ + force_update_wptr_for_self_int(adev, 0, 8, false); + ih_v7_0_toggle_interrupts(adev, false); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * ih_v7_0_get_wptr() - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer. Also check for + * ring buffer overflow and deal with it. + * Returns the value of the wptr. + */ +static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; + + wptr = le32_to_cpu(*ih->wptr_cpu); + ih_regs = &ih->ih_regs; + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 32). Hopefully + * this should allow us to catch up. + */ + tmp = (wptr + 32) & ih->ptr_mask; + dev_warn(adev->dev, "IH ring buffer overflow " + "(0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, tmp); + ih->rptr = tmp; + + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); +out: + return (wptr & ih->ptr_mask); +} + +/** + * ih_v7_0_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * @ih: IH ring to match + * + */ +static void ih_v7_0_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t v = 0; + uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; + + ih_regs = &ih->ih_regs; + + /* Rearm IRQ / re-write doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** + * ih_v7_0_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * @ih: IH ring buffer to set rptr + */ +static void ih_v7_0_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + + if (ih->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + *ih->rptr_cpu = ih->rptr; + WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + ih_v7_0_irq_rearm(adev, ih); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); + } +} + +/** + * ih_v7_0_self_irq - dispatch work for ring 1 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int ih_v7_0_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t wptr = cpu_to_le32(entry->src_data[0]); + + switch (entry->ring_id) { + case 1: + *adev->irq.ih1.wptr_cpu = wptr; + schedule_work(&adev->irq.ih1_work); + break; + default: break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs ih_v7_0_self_irq_funcs = { + .process = ih_v7_0_self_irq, +}; + +static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs; +} + +static int ih_v7_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ih_v7_0_set_interrupt_funcs(adev); + ih_v7_0_set_self_irq_funcs(adev); + return 0; +} + +static int ih_v7_0_sw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool use_bus_addr; + + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + + if (r) + return r; + + /* use gpu virtual address for ih ring + * until ih_checken is programmed to allow + * use bus address for ih ring by psp bl */ + use_bus_addr = + (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + if (r) + return r; + + adev->irq.ih.use_doorbell = true; + adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; + + adev->irq.ih1.ring_size = 0; + adev->irq.ih2.ring_size = 0; + + /* initialize ih control register offset */ + ih_v7_0_init_register_offset(adev); + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int ih_v7_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_fini_sw(adev); + + return 0; +} + +static int ih_v7_0_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = ih_v7_0_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int ih_v7_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ih_v7_0_irq_disable(adev); + + return 0; +} + +static int ih_v7_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ih_v7_0_hw_fini(adev); +} + +static int ih_v7_0_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ih_v7_0_hw_init(adev); +} + +static bool ih_v7_0_is_idle(void *handle) +{ + /* todo */ + return true; +} + +static int ih_v7_0_wait_for_idle(void *handle) +{ + /* todo */ + return -ETIMEDOUT; +} + +static int ih_v7_0_soft_reset(void *handle) +{ + /* todo */ + return 0; +} + +static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL); + field_val = enable ? 0 : 1; + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data); + } + + return; +} + +static int ih_v7_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ih_v7_0_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE); + return 0; +} + +static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t ih_mem_pwr_cntl; + + /* Disable ih sram power cntl before switch powergating mode */ + ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_CTRL_EN, 0); + WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl); + + /* It is recommended to set mem powergating mode to DS mode */ + if (enable) { + /* mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_DS_EN, 1); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_SD_EN, 0); + /* cam mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0); + /* re-enable power cntl */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_CTRL_EN, 1); + } else { + /* mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_DS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_SD_EN, 0); + /* cam mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0); + /* re-enable power cntl*/ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_CTRL_EN, 1); + } + + WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl); +} + +static int ih_v7_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE); + + if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG) + ih_v7_0_update_ih_mem_power_gating(adev, enable); + + return 0; +} + +static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL)) + *flags |= AMD_CG_SUPPORT_IH_CG; + + return; +} + +static const struct amd_ip_funcs ih_v7_0_ip_funcs = { + .name = "ih_v7_0", + .early_init = ih_v7_0_early_init, + .late_init = NULL, + .sw_init = ih_v7_0_sw_init, + .sw_fini = ih_v7_0_sw_fini, + .hw_init = ih_v7_0_hw_init, + .hw_fini = ih_v7_0_hw_fini, + .suspend = ih_v7_0_suspend, + .resume = ih_v7_0_resume, + .is_idle = ih_v7_0_is_idle, + .wait_for_idle = ih_v7_0_wait_for_idle, + .soft_reset = ih_v7_0_soft_reset, + .set_clockgating_state = ih_v7_0_set_clockgating_state, + .set_powergating_state = ih_v7_0_set_powergating_state, + .get_clockgating_state = ih_v7_0_get_clockgating_state, +}; + +static const struct amdgpu_ih_funcs ih_v7_0_funcs = { + .get_wptr = ih_v7_0_get_wptr, + .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, + .set_rptr = ih_v7_0_set_rptr +}; + +static void ih_v7_0_set_interrupt_funcs(struct amdgpu_device *adev) +{ + adev->irq.ih_funcs = &ih_v7_0_funcs; +} + +const struct amdgpu_ip_block_version ih_v7_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &ih_v7_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h new file mode 100644 index 000000000000..af9dcbc451fd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __IH_V7_0_IH_H__ +#define __IH_V7_0_IH_H__ + +extern const struct amdgpu_ip_block_version ih_v7_0_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index e67a337457ed..99cd49ee8ef6 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -551,7 +551,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; - if(state == adev->jpeg.cur_state) + if (state == adev->jpeg.cur_state) return 0; if (state == AMD_PG_STATE_GATE) @@ -559,7 +559,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle, else ret = jpeg_v2_5_start(adev); - if(!ret) + if (!ret) adev->jpeg.cur_state = state; return ret; @@ -754,8 +754,7 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) } } -const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = -{ +const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = { .type = AMD_IP_BLOCK_TYPE_JPEG, .major = 2, .minor = 5, @@ -763,8 +762,7 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = .funcs = &jpeg_v2_5_ip_funcs, }; -const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = -{ +const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = { .type = AMD_IP_BLOCK_TYPE_JPEG, .major = 2, .minor = 6, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index bc38b90f8cf8..88ea58d5c4ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, @@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { - .set = jpeg_v4_0_set_interrupt_state, .process = jpeg_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 82b6b62c170b..32caeb37cef9 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -652,7 +652,7 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) * * Write a start command to the ring. */ -static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) +void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) { amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, @@ -672,7 +672,7 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) * * Write a end command to the ring. */ -static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) +void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) { amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, @@ -695,7 +695,7 @@ static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) * * Write a fence and a trap command to the ring. */ -static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, +void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned int flags) { WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); @@ -764,7 +764,7 @@ static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, * * Write ring commands to execute the indirect buffer. */ -static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, +void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) @@ -815,7 +815,7 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0x2); } -static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, +void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { uint32_t reg_offset = (reg << 2); @@ -842,7 +842,7 @@ static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_ amdgpu_ring_write(ring, mask); } -static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, +void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; @@ -857,7 +857,7 @@ static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask); } -static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { uint32_t reg_offset = (reg << 2); @@ -875,7 +875,7 @@ static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t re amdgpu_ring_write(ring, val); } -static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) +void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) { int i; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h index 22483dc66351..747a3e5f6856 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h @@ -48,4 +48,19 @@ extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block; +void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags); +void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned int flags); +void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr); +void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); +void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring); +void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring); +void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); + #endif /* __JPEG_V4_0_3_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 6ede85b28cc8..8d1754e35605 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -34,7 +34,17 @@ #include "vcn/vcn_4_0_5_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" -#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL +#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX +#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA +#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX + +#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET 0x4026 +#define regJPEG_SYS_INT_EN_INTERNAL_OFFSET 0x4141 +#define regJPEG_CGC_CTRL_INTERNAL_OFFSET 0x4161 +#define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160 +#define regUVD_NO_OP_INTERNAL_OFFSET 0x0029 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); @@ -155,11 +165,18 @@ static int jpeg_v4_0_5_hw_init(void *handle) struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; + // TODO: Enable ring test with DPG support + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { + DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode"); + return 0; + } + r = amdgpu_ring_test_helper(ring); if (r) return r; - DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); + if (!r) + DRM_INFO("JPEG decode initialized successfully under SPG Mode\n"); return 0; } @@ -181,7 +198,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); return 0; } @@ -228,11 +244,11 @@ static int jpeg_v4_0_5_resume(void *handle) return r; } -static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev) +static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst) { uint32_t data = 0; - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK); @@ -242,21 +258,21 @@ static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev) data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE); data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK | JPEG_CGC_GATE__JPEG2_DEC_MASK | JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data); } -static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev) +static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst) { uint32_t data = 0; - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK; @@ -266,47 +282,66 @@ static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev) data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE); data |= (JPEG_CGC_GATE__JPEG_DEC_MASK |JPEG_CGC_GATE__JPEG2_DEC_MASK |JPEG_CGC_GATE__JMCIF_MASK |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data); +} + +static void jpeg_engine_4_0_5_dpg_clock_gating_mode(struct amdgpu_device *adev, + int inst_idx, uint8_t indirect) +{ + uint32_t data = 0; + + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_CTRL_INTERNAL_OFFSET, data, indirect); + + data = 0; + WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_GATE_INTERNAL_OFFSET, + data, indirect); } -static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev) +static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst) { if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { - WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), + WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG), 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, + SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS, 0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); } /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); /* keep the JPEG in static PG mode */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0, ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); return 0; } -static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev) +static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst) { /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), + WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { - WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), + WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG), 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, + SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS, 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); } @@ -315,61 +350,149 @@ static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev) } /** - * jpeg_v4_0_5_start - start JPEG block + * jpeg_v4_0_5_start_dpg_mode - Jpeg start with dpg mode * * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @indirect: indirectly write sram * - * Setup and start the JPEG block + * Start JPEG block with dpg mode */ -static int jpeg_v4_0_5_start(struct amdgpu_device *adev) +static void jpeg_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { - struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; - int r; + struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec; + uint32_t reg_data = 0; - if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_jpeg(adev, true); + /* enable anti hang mechanism */ + reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS); + reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; + reg_data |= 0x1; + WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data); - /* doorbell programming is done for every playback */ - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + WREG32(SOC15_REG_OFFSET(JPEG, inst_idx, regUVD_IPX_DLDO_CONFIG), + 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, inst_idx, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); + } - WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); + reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS); + reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK; + WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data); - /* disable power gating */ - r = jpeg_v4_0_5_disable_static_power_gating(adev); - if (r) - return r; + if (indirect) + adev->jpeg.inst[inst_idx].dpg_sram_curr_addr = + (uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr; - /* JPEG disable CGC */ - jpeg_v4_0_5_disable_clock_gating(adev); + jpeg_engine_4_0_5_dpg_clock_gating_mode(adev, inst_idx, indirect); /* MJPEG global tiling registers */ - WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - + WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET, + adev->gfx.config.gb_addr_config, indirect); + /* enable System Interrupt for JRBC */ + WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_SYS_INT_EN_INTERNAL_OFFSET, + JPEG_SYS_INT_EN__DJRBC_MASK, indirect); - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + /* add nop to workaround PSP size check */ + WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regUVD_NO_OP_INTERNAL_OFFSET, 0, indirect); - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); + if (indirect) + amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0); - WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v4_0_5_stop_dpg_mode - Jpeg stop with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * + * Stop JPEG block with dpg mode + */ +static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +{ + uint32_t reg_data = 0; + + reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS); + reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK; + WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data); + +} + +/** + * jpeg_v4_0_5_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v4_0_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + int r, i; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + /* doorbell programming is done for every playback */ + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); + + WREG32_SOC15(VCN, i, regVCN_JPEG_DB_CTRL, + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); + + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { + jpeg_v4_0_5_start_dpg_mode(adev, i, adev->jpeg.indirect_sram); + continue; + } + + /* disable power gating */ + r = jpeg_v4_0_5_disable_static_power_gating(adev, i); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v4_0_5_disable_clock_gating(adev, i); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR); + } return 0; } @@ -383,19 +506,26 @@ static int jpeg_v4_0_5_start(struct amdgpu_device *adev) */ static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) { - int r; + int r, i; - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { - jpeg_v4_0_5_enable_clock_gating(adev); + jpeg_v4_0_5_stop_dpg_mode(adev, i); + continue; + } + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); - /* enable power gating */ - r = jpeg_v4_0_5_enable_static_power_gating(adev); - if (r) - return r; + jpeg_v4_0_5_enable_clock_gating(adev, i); + + /* enable power gating */ + r = jpeg_v4_0_5_enable_static_power_gating(adev, i); + if (r) + return r; + } if (adev->pm.dpm_enabled) amdgpu_dpm_enable_jpeg(adev, false); @@ -479,13 +609,20 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; - if (enable) { - if (!jpeg_v4_0_5_is_idle(handle)) - return -EBUSY; - jpeg_v4_0_5_enable_clock_gating(adev); - } else { - jpeg_v4_0_5_disable_clock_gating(adev); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (enable) { + if (!jpeg_v4_0_5_is_idle(handle)) + return -EBUSY; + + jpeg_v4_0_5_enable_clock_gating(adev, i); + } else { + jpeg_v4_0_5_disable_clock_gating(adev, i); + } } return 0; @@ -516,14 +653,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -598,19 +727,32 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; - DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; + DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i); + } } static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { - .set = jpeg_v4_0_5_set_interrupt_state, .process = jpeg_v4_0_5_process_interrupt, }; static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) { - adev->jpeg.inst->irq.num_types = 1; - adev->jpeg.inst->irq.funcs = &jpeg_v4_0_5_irq_funcs; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].irq.num_types = 1; + adev->jpeg.inst[i].irq.funcs = &jpeg_v4_0_5_irq_funcs; + } } const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c new file mode 100644 index 000000000000..e70200f97555 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -0,0 +1,570 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v4_0_3.h" + +#include "vcn/vcn_5_0_0_offset.h" +#include "vcn/vcn_5_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" + +static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v5_0_0_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * jpeg_v5_0_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v5_0_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_rings = 1; + + jpeg_v5_0_0_set_dec_ring_funcs(adev); + jpeg_v5_0_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v5_0_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v5_0_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = adev->jpeg.inst->ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + ring->vm_hub = AMDGPU_MMHUB0(0); + + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v5_0_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v5_0_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v5_0_0_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v5_0_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + int r; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); + + return 0; +} + +/** + * jpeg_v5_0_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v5_0_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) + jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + + return 0; +} + +/** + * jpeg_v5_0_0_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v5_0_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v5_0_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v5_0_0_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v5_0_0_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v5_0_0_hw_init(adev); + + return r; +} + +static void jpeg_v5_0_0_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + data &= ~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK); + WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); +} + +static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL); + + data |= 1 << JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT; + WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); +} + +static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + data = 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT; + WREG32_SOC15(JPEG, 0, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); + + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* keep the JPEG in static PG mode */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); + + return 0; +} + +static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev) +{ + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG), + 2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK); + } + + return 0; +} + +/** + * jpeg_v5_0_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v5_0_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + int r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + + /* disable power gating */ + r = jpeg_v5_0_0_disable_static_power_gating(adev); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v5_0_0_disable_clock_gating(adev); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC0_MASK, + ~JPEG_SYS_INT_EN__DJRBC0_MASK); + + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v5_0_0_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v5_0_0_stop(struct amdgpu_device *adev) +{ + int r; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v5_0_0_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v5_0_0_enable_static_power_gating(adev); + if (r) + return r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, false); + + return 0; +} + +/** + * jpeg_v5_0_0_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v5_0_0_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v5_0_0_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v5_0_0_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return *ring->wptr_cpu_addr; + else + return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v5_0_0_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static bool jpeg_v5_0_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 1; + + ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + + return ret; +} + +static int jpeg_v5_0_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v5_0_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (!jpeg_v5_0_0_is_idle(handle)) + return -EBUSY; + jpeg_v5_0_0_enable_clock_gating(adev); + } else { + jpeg_v5_0_0_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v5_0_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v5_0_0_stop(adev); + else + ret = jpeg_v5_0_0_start(adev); + + if (!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v5_0_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_4_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(adev->jpeg.inst->ring_dec); + break; + default: + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { + .name = "jpeg_v5_0_0", + .early_init = jpeg_v5_0_0_early_init, + .late_init = NULL, + .sw_init = jpeg_v5_0_0_sw_init, + .sw_fini = jpeg_v5_0_0_sw_fini, + .hw_init = jpeg_v5_0_0_hw_init, + .hw_fini = jpeg_v5_0_0_hw_fini, + .suspend = jpeg_v5_0_0_suspend, + .resume = jpeg_v5_0_0_resume, + .is_idle = jpeg_v5_0_0_is_idle, + .wait_for_idle = jpeg_v5_0_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state, + .set_powergating_state = jpeg_v5_0_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr, + .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr, + .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v5_0_0_dec_ring_emit_vm_flush */ + 22 + 22 + /* jpeg_v5_0_0_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v5_0_0_dec_ring_emit_ib */ + .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, + .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v4_0_3_dec_ring_nop, + .insert_start = jpeg_v4_0_3_dec_ring_insert_start, + .insert_end = jpeg_v4_0_3_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec->funcs = &jpeg_v5_0_0_dec_ring_vm_funcs; + DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v5_0_0_irq_funcs = { + .set = jpeg_v5_0_0_set_interrupt_state, + .process = jpeg_v5_0_0_process_interrupt, +}; + +static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v5_0_0_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &jpeg_v5_0_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h new file mode 100644 index 000000000000..bd348336b215 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V5_0_0_H__ +#define __JPEG_V5_0_0_H__ + +extern const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block; + +#endif /* __JPEG_V5_0_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c new file mode 100644 index 000000000000..396262044ea8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c @@ -0,0 +1,121 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/delay.h> +#include "amdgpu.h" +#include "lsdma_v7_0.h" +#include "amdgpu_lsdma.h" + +#include "lsdma/lsdma_7_0_0_offset.h" +#include "lsdma/lsdma_7_0_0_sh_mask.h" + +static int lsdma_v7_0_wait_pio_status(struct amdgpu_device *adev) +{ + return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS), + LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK, + LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK); +} + +static int lsdma_v7_0_copy_mem(struct amdgpu_device *adev, + uint64_t src_addr, + uint64_t dst_addr, + uint64_t size) +{ + int ret; + uint32_t tmp; + + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr)); + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr)); + + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr)); + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr)); + + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0); + + tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0); + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp); + + ret = lsdma_v7_0_wait_pio_status(adev); + if (ret) + dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n"); + + return ret; +} + +static int lsdma_v7_0_fill_mem(struct amdgpu_device *adev, + uint64_t dst_addr, + uint32_t data, + uint64_t size) +{ + int ret; + uint32_t tmp; + + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data); + + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr)); + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr)); + + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0); + + tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1); + WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp); + + ret = lsdma_v7_0_wait_pio_status(adev); + if (ret) + dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n"); + + return ret; +} + +static void lsdma_v7_0_update_memory_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL); + tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, 0); + WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp); + + tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, enable); + WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp); +} + +const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs = { + .copy_mem = lsdma_v7_0_copy_mem, + .fill_mem = lsdma_v7_0_fill_mem, + .update_memory_power_gating = lsdma_v7_0_update_memory_power_gating +}; diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h new file mode 100644 index 000000000000..52b4485cdd98 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __LSDMA_V7_0_H__ +#define __LSDMA_V7_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_lsdma_funcs lsdma_v7_0_funcs; + +#endif /* __LSDMA_V7_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index fb53aacdcba2..c0fc44cdd658 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -33,6 +33,7 @@ #define regVM_L2_CNTL3_DEFAULT 0x80100007 #define regVM_L2_CNTL4_DEFAULT 0x000000c1 +#define mmSMNAID_AID0_MCA_SMU 0x03b30400 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) { @@ -705,8 +706,94 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, }; +static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle, + struct aca_bank *bank, enum aca_error_type type, + struct aca_bank_report *report, void *data) +{ + u64 status, misc0; + int ret; + + status = bank->regs[ACA_REG_IDX_STATUS]; + if ((type == ACA_ERROR_TYPE_UE && + ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || + (type == ACA_ERROR_TYPE_CE && + ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + } + + return 0; +} + +/* reference to smu driver if header file */ +static int mmhub_v1_8_err_codes[] = { + 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */ + 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */ + 10, /* CODE_UTCL2_ROUTER */ + 11, /* CODE_VML2 */ + 12, /* CODE_VML2_WALKER */ + 13, /* CODE_MMCANE */ +}; + +static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_error_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + mmhub_v1_8_err_codes, + ARRAY_SIZE(mmhub_v1_8_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = { + .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report, + .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid, +}; + +static const struct aca_info mmhub_v1_8_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &mmhub_v1_8_aca_bank_ops, +}; + +static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB, + &mmhub_v1_8_aca_info, NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + struct amdgpu_mmhub_ras mmhub_v1_8_ras = { .ras_block = { .hw_ops = &mmhub_v1_8_ras_hw_ops, + .ras_late_init = mmhub_v1_8_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c index dc4812ecc98d..b3961968c10c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c @@ -98,6 +98,7 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev, switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 3, 0): + case IP_VERSION(3, 3, 1): mmhub_cid = mmhub_client_ids_v3_3[cid][rw]; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 63725b2ebc03..a2bd2c3b1ef9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -404,7 +404,8 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev) return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); } -static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev) +static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block) { xgpu_ai_send_access_requests(adev, IDH_RAS_POISON); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 6a68ee946f1c..77f5b55decf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -152,14 +152,14 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_nv_mailbox_set_valid(adev, false); } -static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, - enum idh_request req) +static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) { int r, retry = 1; enum idh_event event = -1; send_request: - xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); + xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); switch (req) { case IDH_REQ_GPU_INIT_ACCESS: @@ -170,6 +170,10 @@ send_request: case IDH_REQ_GPU_INIT_DATA: event = IDH_REQ_GPU_INIT_DATA_READY; break; + case IDH_RAS_POISON: + if (data1 != 0) + event = IDH_RAS_POISON_READY; + break; default: break; } @@ -206,6 +210,13 @@ send_request: return 0; } +static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + return xgpu_nv_send_access_requests_with_param(adev, + req, 0, 0, 0); +} + static int xgpu_nv_request_reset(struct amdgpu_device *adev) { int ret, i = 0; @@ -424,9 +435,17 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); } -static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev) +static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block) { - xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); + if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) { + xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); + } else { + amdgpu_virt_fini_data_exchange(adev); + xgpu_nv_send_access_requests_with_param(adev, + IDH_RAS_POISON, block, 0, 0); + amdgpu_virt_init_data_exchange(adev); + } } const struct amdgpu_virt_ops xgpu_nv_virt_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index d0221ce08769..1e8fd90cab43 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -51,6 +51,7 @@ enum idh_event { IDH_FAIL, IDH_QUERY_ALIVE, IDH_REQ_GPU_INIT_DATA_READY, + IDH_RAS_POISON_READY, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index de93614726c9..4178f4e5dad7 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -728,8 +728,7 @@ static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev) adev->irq.ih_funcs = &navi10_ih_funcs; } -const struct amdgpu_ip_block_version navi10_ih_ip_block = -{ +const struct amdgpu_ip_block_version navi10_ih_ip_block = { .type = AMD_IP_BLOCK_TYPE_IH, .major = 5, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c index 1f52b4b1db03..05020141c0ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -89,7 +89,9 @@ static void nbio_v7_11_vpe_doorbell_range(struct amdgpu_device *adev, int instan bool use_doorbell, int doorbell_index, int doorbell_size) { - u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE); + u32 reg = instance == 0 ? + SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE_DOORBELL_RANGE) : + SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VPE1_DOORBELL_RANGE); u32 doorbell_range = RREG32_PCIE_PORT(reg); if (use_doorbell) { @@ -112,7 +114,10 @@ static void nbio_v7_11_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, int doorbell_index, int instance) { - u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE); + u32 reg = instance == 0 ? + SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE): + SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN1_DOORBELL_RANGE); + u32 doorbell_range = RREG32_PCIE_PORT(reg); if (use_doorbell) { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index e90f33780803..b4723d68eab0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) u32 inst_mask; int i; + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET( + NBIO, 0, + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) + << 2; WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, 0xff & ~(adev->gfx.xcc_mask)); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 4bb5e10217bb..7566973ed8f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -296,6 +296,7 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_VPEC_FW1 = 100, /* VPEC FW1 To Save VPE */ GFX_FW_TYPE_VPEC_FW2 = 101, /* VPEC FW2 To Save VPE */ GFX_FW_TYPE_VPE = 102, + GFX_FW_TYPE_JPEG_RAM = 128, /**< JPEG Command buffer */ GFX_FW_TYPE_P2S_TABLE = 129, GFX_FW_TYPE_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index efa37e3b7931..2395f1856962 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -506,7 +506,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) * before training, and restore it after training to avoid * VRAM corruption. */ - sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; + sz = BIST_MEM_TRAINING_ENCROACHED_SIZE; if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index df1844d0800f..0da50ea46eaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -27,6 +27,7 @@ #include "amdgpu_ucode.h" #include "soc15_common.h" #include "psp_v13_0.h" +#include "amdgpu_ras.h" #include "mp/mp_13_0_2_offset.h" #include "mp/mp_13_0_2_sh_mask.h" @@ -52,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_1_ta.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 @@ -100,6 +103,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 11): case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): err = psp_init_toc_microcode(psp, ucode_prefix); if (err) return err; @@ -187,11 +191,18 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; + int ret; if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { - psp_v13_0_wait_for_vmbx_ready(psp); + ret = psp_v13_0_wait_for_vmbx_ready(psp); + if (ret) + amdgpu_ras_query_boot_status(adev, 4); + + ret = psp_v13_0_wait_for_bootloader(psp); + if (ret) + amdgpu_ras_query_boot_status(adev, 4); - return psp_v13_0_wait_for_bootloader(psp); + return ret; } return 0; @@ -553,7 +564,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops) * before training, and restore it after training to avoid * VRAM corruption. */ - sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; + sz = BIST_MEM_TRAINING_ENCROACHED_SIZE; if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", @@ -763,81 +774,28 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp) return 0; } - -static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev, - uint32_t inst, - uint32_t boot_error) -{ - uint32_t socket_id; - uint32_t aid_id; - uint32_t hbm_id; - uint32_t reg_data; - - socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID); - aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID); - hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID); - - reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109); - dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n", - socket_id, aid_id, reg_data); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING)) - dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n", - socket_id, aid_id, hbm_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD)) - dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n", - socket_id, aid_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING)) - dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n", - socket_id, aid_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING)) - dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n", - socket_id, aid_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING)) - dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n", - socket_id, aid_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING)) - dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n", - socket_id, aid_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST)) - dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n", - socket_id, aid_id, hbm_id); - - if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST)) - dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n", - socket_id, aid_id, hbm_id); -} - -static int psp_v13_0_query_boot_status(struct psp_context *psp) +static bool psp_v13_0_get_ras_capability(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - int inst_mask = adev->aid_mask; - uint32_t reg_data; - uint32_t i; - int ret = 0; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + u32 reg_data; - if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) - return 0; + /* query ras cap should be done from host side */ + if (amdgpu_sriov_vf(adev)) + return false; - if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109) - return 0; + if (!con) + return false; - for_each_inst(i, inst_mask) { - reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126); - if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) { - psp_v13_0_boot_error_reporting(adev, i, reg_data); - ret = -EINVAL; - break; - } + if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) && + (!(adev->flags & AMD_IS_APU))) { + reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127); + adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0)); + con->poison_supported = ((reg_data & GENMASK_ULL(24, 24)) >> 24) ? true : false; + return true; + } else { + return false; } - - return ret; } static const struct psp_funcs psp_v13_0_funcs = { @@ -862,7 +820,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .update_spirom = psp_v13_0_update_spirom, .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, - .query_boot_status = psp_v13_0_query_boot_status, + .get_ras_capability = psp_v13_0_get_ras_capability, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c new file mode 100644 index 000000000000..78a95f8f370b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -0,0 +1,672 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <drm/drm_drv.h> +#include <linux/vmalloc.h> +#include "amdgpu.h" +#include "amdgpu_psp.h" +#include "amdgpu_ucode.h" +#include "soc15_common.h" +#include "psp_v14_0.h" + +#include "mp/mp_14_0_2_offset.h" +#include "mp/mp_14_0_2_sh_mask.h" + +MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); + +/* For large FW files the time to complete can be very long */ +#define USBC_PD_POLLING_LIMIT_S 240 + +/* Read USB-PD from LFB */ +#define GFX_CMD_USB_PD_USE_LFB 0x480 + +/* VBIOS gfl defines */ +#define MBOX_READY_MASK 0x80000000 +#define MBOX_STATUS_MASK 0x0000FFFF +#define MBOX_COMMAND_MASK 0x00FF0000 +#define MBOX_READY_FLAG 0x80000000 +#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2 +#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3 +#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4 + +/* memory training timeout define */ +#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 + +static int psp_v14_0_init_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + char ucode_prefix[30]; + int err = 0; + + amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); + + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + err = psp_init_sos_microcode(psp, ucode_prefix); + if (err) + return err; + break; + default: + BUG(); + } + + return 0; +} + +static bool psp_v14_0_is_sos_alive(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81); + + return sol_reg != 0x0; +} + +static int psp_v14_0_wait_for_bootloader(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + int ret; + int retry_loop; + + for (retry_loop = 0; retry_loop < 10; retry_loop++) { + /* Wait for bootloader to signify that is + ready having bit 31 of C2PMSG_35 set to 1 */ + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, + 0x80000000, + false); + + if (ret == 0) + return 0; + } + + return ret; +} + +static int psp_v14_0_bootloader_load_component(struct psp_context *psp, + struct psp_bin_desc *bin_desc, + enum psp_bootloader_cmd bl_cmd) +{ + int ret; + uint32_t psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + + /* Check tOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + if (psp_v14_0_is_sos_alive(psp)) + return 0; + + ret = psp_v14_0_wait_for_bootloader(psp); + if (ret) + return ret; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + + /* Copy PSP KDB binary to memory */ + memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes); + + /* Provide the PSP KDB to bootloader */ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = bl_cmd; + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + ret = psp_v14_0_wait_for_bootloader(psp); + + return ret; +} + +static int psp_v14_0_bootloader_load_kdb(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE); +} + +static int psp_v14_0_bootloader_load_spl(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_TOS_SPL_TABLE); +} + +static int psp_v14_0_bootloader_load_sysdrv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV); +} + +static int psp_v14_0_bootloader_load_soc_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV); +} + +static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV); +} + +static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV); +} + +static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV); +} + + +static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) +{ + int ret; + unsigned int psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + + /* Check sOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + if (psp_v14_0_is_sos_alive(psp)) + return 0; + + ret = psp_v14_0_wait_for_bootloader(psp); + if (ret) + return ret; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + + /* Copy Secure OS binary to PSP memory */ + memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes); + + /* Provide the PSP secure OS to bootloader */ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV; + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), + RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), + 0, true); + + return ret; +} + +static int psp_v14_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + /* Write the ring destroy command*/ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + } else { + /* Write the ring destroy command*/ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + } + + return ret; +} + +static int psp_v14_0_ring_create(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + unsigned int psp_ring_reg = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + ret = psp_v14_0_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v14_0_ring_stop_sriov failed!\n"); + return ret; + } + + /* Write low address of the ring to C2PMSG_102 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, psp_ring_reg); + /* Write high address of the ring to C2PMSG_103 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_103, psp_ring_reg); + + /* Write the ring initialization command to C2PMSG_101 */ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_INIT_GPCOM_RING); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_101 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, false); + + } else { + /* Wait for sOS ready for ring creation */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + if (ret) { + DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); + return ret; + } + + /* Write low address of the ring to C2PMSG_69 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_69, psp_ring_reg); + /* Write high address of the ring to C2PMSG_70 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_70, psp_ring_reg); + /* Write size of ring to C2PMSG_71 */ + psp_ring_reg = ring->ring_size; + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_71, psp_ring_reg); + /* Write the ring initialization command to C2PMSG_64 */ + psp_ring_reg = ring_type; + psp_ring_reg = psp_ring_reg << 16; + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, psp_ring_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); + } + + return ret; +} + +static int psp_v14_0_ring_destroy(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + ret = psp_v14_0_ring_stop(psp, ring_type); + if (ret) + DRM_ERROR("Fail to stop psp ring\n"); + + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + + return ret; +} + +static uint32_t psp_v14_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) + data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67); + + return data; +} + +static void psp_v14_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67, value); +} + +static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg) +{ + int ret; + int i; + uint32_t data_32; + int max_wait; + struct amdgpu_device *adev = psp->adev; + + data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, data_32); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, msg); + + max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; + for (i = 0; i < max_wait; i++) { + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret == 0) + break; + } + if (i < max_wait) + ret = 0; + else + ret = -ETIME; + + dev_dbg(adev->dev, "training %s %s, cost %d @ %d ms\n", + (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long", + (ret == 0) ? "succeed" : "failed", + i, adev->usec_timeout/1000); + return ret; +} + + +static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops) +{ + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + uint32_t *pcache = (uint32_t *)ctx->sys_cache; + struct amdgpu_device *adev = psp->adev; + uint32_t p2c_header[4]; + uint32_t sz; + void *buf; + int ret, idx; + + if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { + dev_dbg(adev->dev, "Memory training is not supported.\n"); + return 0; + } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) { + dev_err(adev->dev, "Memory training initialization failure.\n"); + return -EINVAL; + } + + if (psp_v14_0_is_sos_alive(psp)) { + dev_dbg(adev->dev, "SOS is alive, skip memory training.\n"); + return 0; + } + + amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + dev_dbg(adev->dev, "sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", + pcache[0], pcache[1], pcache[2], pcache[3], + p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + dev_dbg(adev->dev, "Short training depends on restore.\n"); + ops |= PSP_MEM_TRAIN_RESTORE; + } + + if ((ops & PSP_MEM_TRAIN_RESTORE) && + pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + dev_dbg(adev->dev, "sys_cache[0] is invalid, restore depends on save.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + pcache[3] == p2c_header[3])) { + dev_dbg(adev->dev, "sys_cache is invalid or out-of-date, need save training data to sys_cache.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if ((ops & PSP_MEM_TRAIN_SAVE) && + p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + dev_dbg(adev->dev, "p2c_header[0] is invalid, save depends on long training.\n"); + ops |= PSP_MEM_TRAIN_SEND_LONG_MSG; + } + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG; + ops |= PSP_MEM_TRAIN_SAVE; + } + + dev_dbg(adev->dev, "Memory training ops:%x.\n", ops); + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + /* + * Long training will encroach a certain amount on the bottom of VRAM; + * save the content from the bottom of VRAM to system memory + * before training, and restore it after training to avoid + * VRAM corruption. + */ + sz = BIST_MEM_TRAINING_ENCROACHED_SIZE; + + if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { + dev_err(adev->dev, "visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", + adev->gmc.visible_vram_size, + adev->mman.aper_base_kaddr); + return -EINVAL; + } + + buf = vmalloc(sz); + if (!buf) { + dev_err(adev->dev, "failed to allocate system memory.\n"); + return -ENOMEM; + } + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); + ret = psp_v14_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); + if (ret) { + DRM_ERROR("Send long training msg failed.\n"); + vfree(buf); + drm_dev_exit(idx); + return ret; + } + + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); + adev->hdp.funcs->flush_hdp(adev, NULL); + vfree(buf); + drm_dev_exit(idx); + } else { + vfree(buf); + return -ENODEV; + } + } + + if (ops & PSP_MEM_TRAIN_SAVE) { + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false); + } + + if (ops & PSP_MEM_TRAIN_RESTORE) { + amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true); + } + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + ret = psp_v14_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ? + PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN); + if (ret) { + dev_err(adev->dev, "send training msg failed.\n"); + return ret; + } + } + ctx->training_cnt++; + return 0; +} + +static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t reg_status; + int ret, i = 0; + + /* + * LFB address which is aligned to 1MB address and has to be + * right-shifted by 20 so that LFB address can be passed on a 32-bit C2P + * register + */ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the address */ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16)); + + /* FW load takes very long time */ + do { + msleep(1000); + reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35); + + if (reg_status & 0x80000000) + goto done; + + } while (++i < USBC_PD_POLLING_LIMIT_S); + + return -ETIME; +done: + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %04x\n", + reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (!ret) + *fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36); + + return ret; +} + +static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd) +{ + uint32_t reg_status = 0, reg_val = 0; + struct amdgpu_device *adev = psp->adev; + int ret; + + /* clear MBX ready (MBOX_READY_MASK bit is 0) and set update command */ + reg_val |= (cmd << 16); + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115, reg_val); + + /* Ring the doorbell */ + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_73, 1); + + if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE) + ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, false); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, false); + if (ret) { + dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); + return ret; + } + + reg_status = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115); + if ((reg_status & 0xFFFF) != 0) { + dev_err(adev->dev, "SPI cmd %x failed, fail status = %04x\n", + cmd, reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v14_0_update_spirom(struct psp_context *psp, + uint64_t fw_pri_mc_addr) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + /* Confirm PSP is ready to start */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, false); + if (ret) { + dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); + return ret; + } + + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr)); + + ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO); + if (ret) + return ret; + + WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr)); + + ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI); + if (ret) + return ret; + + psp->vbflash_done = true; + + ret = psp_v14_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE); + if (ret) + return ret; + + return 0; +} + +static int psp_v14_0_vbflash_status(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + return RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_115); +} + +static const struct psp_funcs psp_v14_0_funcs = { + .init_microcode = psp_v14_0_init_microcode, + .bootloader_load_kdb = psp_v14_0_bootloader_load_kdb, + .bootloader_load_spl = psp_v14_0_bootloader_load_spl, + .bootloader_load_sysdrv = psp_v14_0_bootloader_load_sysdrv, + .bootloader_load_soc_drv = psp_v14_0_bootloader_load_soc_drv, + .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv, + .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv, + .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv, + .bootloader_load_sos = psp_v14_0_bootloader_load_sos, + .ring_create = psp_v14_0_ring_create, + .ring_stop = psp_v14_0_ring_stop, + .ring_destroy = psp_v14_0_ring_destroy, + .ring_get_wptr = psp_v14_0_ring_get_wptr, + .ring_set_wptr = psp_v14_0_ring_set_wptr, + .mem_training = psp_v14_0_memory_training, + .load_usbc_pd_fw = psp_v14_0_load_usbc_pd_fw, + .read_usbc_pd_fw = psp_v14_0_read_usbc_pd_fw, + .update_spirom = psp_v14_0_update_spirom, + .vbflash_stat = psp_v14_0_vbflash_status +}; + +void psp_v14_0_set_psp_funcs(struct psp_context *psp) +{ + psp->funcs = &psp_v14_0_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h new file mode 100644 index 000000000000..dd18ba2cfad5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.h @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __PSP_V14_0_H__ +#define __PSP_V14_0_H__ + +#include "amdgpu_psp.h" + +#define PSP_SPIROM_UPDATE_TIMEOUT 60000 /* 60s */ + +void psp_v14_0_set_psp_funcs(struct psp_context *psp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 8d5d86675a7f..07e19caf2bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -57,22 +57,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); MODULE_FIRMWARE("amdgpu/topaz_sdma.bin"); MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin"); -static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = -{ +static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { SDMA0_REGISTER_OFFSET, SDMA1_REGISTER_OFFSET }; -static const u32 golden_settings_iceland_a11[] = -{ +static const u32 golden_settings_iceland_a11[] = { mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, }; -static const u32 iceland_mgcg_cgcg_init[] = -{ +static const u32 iceland_mgcg_cgcg_init[] = { mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 }; @@ -142,7 +139,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) case CHIP_TOPAZ: chip_name = "topaz"; break; - default: BUG(); + default: + BUG(); } for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1258,8 +1256,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } -const struct amdgpu_ip_block_version sdma_v2_4_ip_block = -{ +const struct amdgpu_ip_block_version sdma_v2_4_ip_block = { .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 4, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 2d688dca26be..fec5a3d1c4bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); +#define mmSMNAID_AID0_MCA_SMU 0x03b30400 + #define WREG32_SDMA(instance, offset, value) \ WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value) #define RREG32_SDMA(instance, offset) \ @@ -2204,9 +2206,79 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = { .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count, }; +static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, + struct aca_bank *bank, enum aca_error_type type, + struct aca_bank_report *report, void *data) +{ + u64 status, misc0; + int ret; + + status = bank->regs[ACA_REG_IDX_STATUS]; + if ((type == ACA_ERROR_TYPE_UE && + ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || + (type == ACA_ERROR_TYPE_CE && + ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + } + + return 0; +} + +/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */ +static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 }; + +static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_error_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + sdma_v4_4_2_err_codes, + ARRAY_SIZE(sdma_v4_4_2_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = { + .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report, + .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid, +}; + +static const struct aca_info sdma_v4_4_2_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &sdma_v4_4_2_aca_bank_ops, +}; + +static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_sdma_ras_late_init(adev, ras_block); + if (r) + return r; + + return amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__SDMA, + &sdma_v4_4_2_aca_info, NULL); +} + static struct amdgpu_sdma_ras sdma_v4_4_2_ras = { .ras_block = { .hw_ops = &sdma_v4_4_2_ras_hw_ops, + .ras_late_init = sdma_v4_4_2_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 3c7ddd219de8..4874ded45653 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin"); +MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin"); #define SDMA1_REG_OFFSET 0x600 #define SDMA0_HYP_DEC_REG_START 0x5880 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 15033efec2ba..c64c01e2944a 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1298,10 +1298,32 @@ static int soc15_common_suspend(void *handle) return soc15_common_hw_fini(adev); } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc15_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); + soc15_asic_reset(adev); + } return soc15_common_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 48c6efcdeac9..5f81c264e310 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = { @@ -711,6 +711,7 @@ static int soc21_common_early_init(void *handle) AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG_DPG | AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_GFX_PG; @@ -865,6 +866,7 @@ static int soc21_common_set_clockgating_state(void *handle, case IP_VERSION(7, 7, 0): case IP_VERSION(7, 7, 1): case IP_VERSION(7, 11, 0): + case IP_VERSION(7, 11, 1): adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 879bb7af297c..056d4df8fa1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -36,6 +36,9 @@ enum ras_command { TA_RAS_COMMAND__ENABLE_FEATURES = 0, TA_RAS_COMMAND__DISABLE_FEATURES, TA_RAS_COMMAND__TRIGGER_ERROR, + TA_RAS_COMMAND__QUERY_BLOCK_INFO, + TA_RAS_COMMAND__QUERY_SUB_BLOCK_INFO, + TA_RAS_COMMAND__QUERY_ADDRESS, }; enum ta_ras_status { @@ -105,6 +108,11 @@ enum ta_ras_error_type { TA_RAS_ERROR__POISON = 8, }; +enum ta_ras_address_type { + TA_RAS_MCA_TO_PA, + TA_RAS_PA_TO_MCA, +}; + /* Input/output structures for RAS commands */ /**********************************************************/ @@ -133,12 +141,38 @@ struct ta_ras_init_flags { uint8_t channel_dis_num; }; +struct ta_ras_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; +}; + +struct ta_ras_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct ta_ras_query_address_input { + enum ta_ras_address_type addr_type; + struct ta_ras_mca_addr ma; + struct ta_ras_phy_addr pa; +}; + struct ta_ras_output_flags { uint8_t ras_init_success_flag; uint8_t err_inject_switch_disable_flag; uint8_t reg_access_failure_flag; }; +struct ta_ras_query_address_output { + /* don't use the flags here */ + struct ta_ras_output_flags flags; + struct ta_ras_mca_addr ma; + struct ta_ras_phy_addr pa; +}; + /* Common input structure for RAS callbacks */ /**********************************************************/ union ta_ras_cmd_input { @@ -146,12 +180,14 @@ union ta_ras_cmd_input { struct ta_ras_enable_features_input enable_features; struct ta_ras_disable_features_input disable_features; struct ta_ras_trigger_error_input trigger_error; + struct ta_ras_query_address_input address; uint32_t reserve_pad[256]; }; union ta_ras_cmd_output { struct ta_ras_output_flags flags; + struct ta_ras_query_address_output address; uint32_t reserve_pad[256]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 7458a218e89d..14ef7a24be7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -89,12 +89,28 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) umc_v12_0_reset_error_count_per_channel, NULL); } +bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status) +{ + dev_info(adev->dev, + "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n", + mc_umc_status, + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val), + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison), + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred), + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC), + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC), + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) + ); + + return (amdgpu_ras_is_poison_mode_supported(adev) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); +} + bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) { - if (amdgpu_ras_is_poison_mode_supported(adev) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)) - return true; + if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) + return false; return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || @@ -104,9 +120,7 @@ bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_um bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) { - if (amdgpu_ras_is_poison_mode_supported(adev) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)) + if (umc_v12_0_is_deferred_error(adev, mc_umc_status)) return false; return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && @@ -119,9 +133,10 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_ !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))))); } -static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, +static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev, uint64_t umc_reg_offset, - unsigned long *error_count) + unsigned long *error_count, + check_error_type_func error_type_func) { uint64_t mc_umc_status; uint64_t mc_umc_status_addr; @@ -129,31 +144,11 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); - /* Rely on MCUMC_STATUS for correctable error counter - * MCUMC_STATUS is a 64 bit register - */ + /* Check MCUMC_STATUS */ mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (umc_v12_0_is_correctable_error(adev, mc_umc_status)) - *error_count += 1; -} - -static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev, - uint64_t umc_reg_offset, - unsigned long *error_count) -{ - uint64_t mc_umc_status; - uint64_t mc_umc_status_addr; - - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); - - /* Check the MCUMC_STATUS. */ - mc_umc_status = - RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - - if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) + if (error_type_func(adev, mc_umc_status)) *error_count += 1; } @@ -162,7 +157,7 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev, uint32_t ch_inst, void *data) { struct ras_err_data *err_data = (struct ras_err_data *)data; - unsigned long ue_count = 0, ce_count = 0; + unsigned long ue_count = 0, ce_count = 0, de_count = 0; /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3], * which can be used as die ID directly */ @@ -174,11 +169,16 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev, uint64_t umc_reg_offset = get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); - umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count); - umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count); + umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, + &ce_count, umc_v12_0_is_correctable_error); + umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, + &ue_count, umc_v12_0_is_uncorrectable_error); + umc_v12_0_query_error_count_per_type(adev, umc_reg_offset, + &de_count, umc_v12_0_is_deferred_error); amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); + amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count); return 0; } @@ -203,14 +203,14 @@ static bool umc_v12_0_bit_wise_xor(uint32_t val) return result; } -static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, uint64_t err_addr, - uint32_t ch_inst, uint32_t umc_inst, - uint32_t node_inst) +static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev, + uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst, + uint32_t node_inst, + struct ta_ras_query_address_output *addr_out) { uint32_t channel_index, i; - uint64_t soc_pa, na, retired_page, column; - uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row, row_xor; + uint64_t na, soc_pa; + uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row; uint32_t bank0, bank1, bank2, bank3, bank; bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; @@ -260,12 +260,44 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, /* the umc channel bits are not original values, they are hashed */ UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); + addr_out->pa.pa = soc_pa; + addr_out->pa.bank = bank; + addr_out->pa.channel_idx = channel_index; +} + +static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, uint64_t err_addr, + uint32_t ch_inst, uint32_t umc_inst, + uint32_t node_inst) +{ + uint32_t col, row, row_xor, bank, channel_index; + uint64_t soc_pa, retired_page, column; + struct ta_ras_query_address_input addr_in; + struct ta_ras_query_address_output addr_out; + + addr_in.addr_type = TA_RAS_MCA_TO_PA; + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = ch_inst; + addr_in.ma.umc_inst = umc_inst; + addr_in.ma.node_inst = node_inst; + + if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) + /* fallback to old path if fail to get pa from psp */ + umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst, + node_inst, &addr_out); + + soc_pa = addr_out.pa.pa; + bank = addr_out.pa.bank; + channel_index = addr_out.pa.channel_idx; + + col = (err_addr >> 1) & 0x1fULL; + row = (err_addr >> 10) & 0x3fffULL; + row_xor = row ^ (0x1ULL << 13); /* clear [C3 C2] in soc physical address */ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); /* clear [C4] in soc physical address */ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); - row_xor = row ^ (0x1ULL << 13); /* loop for all possibilities of [C4 C3 C2] */ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); @@ -316,10 +348,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, } /* calculate error address if ue error is detected */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1) { - + if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) { mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); @@ -385,45 +414,69 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade { struct ras_err_node *err_node; uint64_t mc_umc_status; + struct ras_err_info *err_info; + struct ras_err_addr *mca_err_addr, *tmp; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; for_each_ras_error(err_node, err_data) { - mc_umc_status = err_node->err_info.err_addr.err_status; - if (!mc_umc_status) + err_info = &err_node->err_info; + if (list_empty(&err_info->err_addr_list)) continue; - if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) { - uint64_t mca_addr, err_addr, mca_ipid; - uint32_t InstanceIdLo; - struct amdgpu_smuio_mcm_config_info *mcm_info; - - mcm_info = &err_node->err_info.mcm_info; - mca_addr = err_node->err_info.err_addr.err_addr; - mca_ipid = err_node->err_info.err_addr.err_ipid; - - err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); - InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); - - dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", - mca_ipid, - mcm_info->die_id, - MCA_IPID_LO_2_UMC_INST(InstanceIdLo), - MCA_IPID_LO_2_UMC_CH(InstanceIdLo), - err_addr); - - umc_v12_0_convert_error_address(adev, - err_data, err_addr, - MCA_IPID_LO_2_UMC_CH(InstanceIdLo), - MCA_IPID_LO_2_UMC_INST(InstanceIdLo), - mcm_info->die_id); - - /* Clear umc error address content */ - memset(&err_node->err_info.err_addr, - 0, sizeof(err_node->err_info.err_addr)); + list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) { + mc_umc_status = mca_err_addr->err_status; + if (mc_umc_status && + (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) || + umc_v12_0_is_deferred_error(adev, mc_umc_status))) { + uint64_t mca_addr, err_addr, mca_ipid; + uint32_t InstanceIdLo; + + mca_addr = mca_err_addr->err_addr; + mca_ipid = mca_err_addr->err_ipid; + + err_addr = REG_GET_FIELD(mca_addr, + MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); + + dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", + mca_ipid, + err_info->mcm_info.die_id, + MCA_IPID_LO_2_UMC_INST(InstanceIdLo), + MCA_IPID_LO_2_UMC_CH(InstanceIdLo), + err_addr); + + umc_v12_0_convert_error_address(adev, + err_data, err_addr, + MCA_IPID_LO_2_UMC_CH(InstanceIdLo), + MCA_IPID_LO_2_UMC_INST(InstanceIdLo), + err_info->mcm_info.die_id); + } + + /* Delete error address node from list and free memory */ + amdgpu_ras_del_mca_err_addr(err_info, mca_err_addr); } } } +static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev, + enum amdgpu_mca_error_type type, void *ras_error_status) +{ + uint64_t mc_umc_status = *(uint64_t *)ras_error_status; + + switch (type) { + case AMDGPU_MCA_ERROR_TYPE_UE: + return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status); + case AMDGPU_MCA_ERROR_TYPE_CE: + return umc_v12_0_is_correctable_error(adev, mc_umc_status); + case AMDGPU_MCA_ERROR_TYPE_DE: + return umc_v12_0_is_deferred_error(adev, mc_umc_status); + default: + return false; + } + + return false; +} + static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) { amdgpu_umc_loop_channels(adev, @@ -444,12 +497,71 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { .query_ras_error_address = umc_v12_0_query_ras_error_address, }; +static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + struct aca_bank_report *report, void *data) +{ + struct amdgpu_device *adev = handle->adev; + u64 status; + int ret; + + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; + + status = bank->regs[ACA_REG_IDX_STATUS]; + switch (type) { + case ACA_ERROR_TYPE_UE: + if (umc_v12_0_is_uncorrectable_error(adev, status)) { + report->count[type] = 1; + } + break; + case ACA_ERROR_TYPE_CE: + if (umc_v12_0_is_correctable_error(adev, status)) { + report->count[type] = 1; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { + .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report, +}; + +const struct aca_info umc_v12_0_aca_info = { + .hwip = ACA_HWIP_TYPE_UMC, + .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, + .bank_ops = &umc_v12_0_aca_bank_ops, +}; + +static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int ret; + + ret = amdgpu_umc_ras_late_init(adev, ras_block); + if (ret) + return ret; + + ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC, + &umc_v12_0_aca_info, NULL); + if (ret) + return ret; + + return 0; +} + struct amdgpu_umc_ras umc_v12_0_ras = { .ras_block = { .hw_ops = &umc_v12_0_ras_hw_ops, + .ras_late_init = umc_v12_0_ras_late_init, }, .err_cnt_init = umc_v12_0_err_cnt_init, .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address, + .check_ecc_err_status = umc_v12_0_check_ecc_err_status, }; + diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index e8de3a92251a..5973bfb14fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -121,9 +121,12 @@ (((_ipid_lo) >> 12) & 0xF)) #define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) +bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status); bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); +typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status); + extern const uint32_t umc_v12_0_channel_idx_tbl[] [UMC_V12_0_UMC_INSTANCE_NUM] diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c index 0d6b50528d76..97fa88ed770c 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c @@ -25,7 +25,7 @@ static void umc_v6_0_init_registers(struct amdgpu_device *adev) { - unsigned i,j; + unsigned i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c new file mode 100644 index 000000000000..d6ee9958ba5f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -0,0 +1,1339 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "soc15_hw_ip.h" +#include "vcn_v2_0.h" + +#include "vcn/vcn_5_0_0_offset.h" +#include "vcn/vcn_5_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" +#include "vcn_v5_0_0.h" + +#include <drm/drm_drv.h> + +static int amdgpu_ih_clientid_vcns[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + +static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev); +static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v5_0_0_set_powergating_state(void *handle, + enum amd_powergating_state state); +static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state); +static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring); + +/** + * vcn_v5_0_0_early_init - set function pointers and load microcode + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + * Load microcode from filesystem + */ +static int vcn_v5_0_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* re-use enc ring as unified ring */ + adev->vcn.num_enc_rings = 1; + + vcn_v5_0_0_set_unified_ring_funcs(adev); + vcn_v5_0_0_set_irq_funcs(adev); + + return amdgpu_vcn_early_init(adev); +} + +/** + * vcn_v5_0_0_sw_init - sw init for VCN block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int vcn_v5_0_0_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r; + + r = amdgpu_vcn_sw_init(adev); + if (r) + return r; + + amdgpu_vcn_setup_ucode(adev); + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + atomic_set(&adev->vcn.inst[i].sched_score, 0); + + /* VCN UNIFIED TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], + VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); + if (r) + return r; + + /* VCN POISON TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); + if (r) + return r; + + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; + + ring->vm_hub = AMDGPU_MMHUB0(0); + sprintf(ring->name, "vcn_unified_%d", i); + + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, + AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); + if (r) + return r; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode; + + return 0; +} + +/** + * vcn_v5_0_0_sw_fini - sw fini for VCN block + * + * @handle: amdgpu_device pointer + * + * VCN suspend and free up sw allocation + */ +static int vcn_v5_0_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r, idx; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = 0; + } + + drm_dev_exit(idx); + } + + r = amdgpu_vcn_suspend(adev); + if (r) + return r; + + r = amdgpu_vcn_sw_fini(adev); + + return r; +} + +/** + * vcn_v5_0_0_hw_init - start and test VCN block + * + * @handle: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v5_0_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i, r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ring = &adev->vcn.inst[i].ring_enc[0]; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); + + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } + +done: + if (!r) + DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); + + return r; +} + +/** + * vcn_v5_0_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v5_0_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); + } + + return 0; +} + +/** + * vcn_v5_0_0_suspend - suspend VCN block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend VCN block + */ +static int vcn_v5_0_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vcn_v5_0_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vcn_suspend(adev); + + return r; +} + +/** + * vcn_v5_0_0_resume - resume VCN block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init VCN block + */ +static int vcn_v5_0_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + r = vcn_v5_0_0_hw_init(adev); + + return r; +} + +/** + * vcn_v5_0_0_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst) +{ + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); + offset = size; + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); +} + +/** + * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Let the VCN memory controller know it's offsets with dpg mode + */ +static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + + return; +} + +/** + * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Disable static power gating for VCN block + */ +static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); + } else { + data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); + + data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); + + data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); + + data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, + UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); + } + + data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); + data &= ~0x103; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | + UVD_POWER_STATUS__UVD_PG_EN_MASK; + + WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); + return; +} + +/** + * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Enable static power gating for VCN block + */ +static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + /* Before power off, this indicator has to be turned on */ + data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); + data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; + data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); + + data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT; + WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, + 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT, + UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); + } + return; +} + +/** + * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Disable clock gating for VCN block + */ +static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst) +{ + return; +} + +#if 0 +/** + * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode + * + * @adev: amdgpu_device pointer + * @sram_sel: sram select + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Disable clock gating for VCN block with dpg mode + */ +static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, + int inst_idx, uint8_t indirect) +{ + return; +} +#endif + +/** + * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Enable clock gating for VCN block + */ +static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst) +{ + return; +} + +/** + * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Start VCN block with dpg mode + */ +static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_ring *ring; + uint32_t tmp; + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); + + if (indirect) + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interrupt */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect); + + /* setup regUVD_LMI_CTRL */ + tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect); + + vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable LMI MC and UMC channels */ + tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, inst_idx, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + if (indirect) + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); + + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4); + + tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); + WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + return 0; +} + +/** + * vcn_v5_0_0_start - VCN start + * + * @adev: amdgpu_device pointer + * + * Start VCN block + */ +static int vcn_v5_0_0_start(struct amdgpu_device *adev) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t tmp; + int i, j, k, r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } + + /* disable VCN power gating */ + vcn_v5_0_0_disable_static_power_gating(adev, i); + + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + vcn_v5_0_0_mc_resume(adev, i); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, i, regUVD_STATUS); + if (status & 2) + break; + mdelay(10); + if (amdgpu_emu_mode == 1) + msleep(1); + } + + if (amdgpu_emu_mode == 1) { + r = -1; + if (status & 2) { + r = 0; + break; + } + } else { + r = 0; + if (status & 2) + break; + + dev_err(adev->dev, + "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + } + + if (r) { + dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4); + + tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR); + WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + } + + return 0; +} + +/** + * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * + * Stop VCN block with dpg mode + */ +static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +{ + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; + uint32_t tmp; + + vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state); + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + return; +} + +/** + * vcn_v5_0_0_stop - VCN stop + * + * @adev: amdgpu_device pointer + * + * Stop VCN block + */ +static int vcn_v5_0_0_stop(struct amdgpu_device *adev) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + uint32_t tmp; + int i, r = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + vcn_v5_0_0_stop_dpg_mode(adev, i); + continue; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* disable LMI UMC channel */ + tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* apply soft reset */ + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + + /* clear status */ + WREG32_SOC15(VCN, i, regUVD_STATUS, 0); + + /* enable VCN power gating */ + vcn_v5_0_0_enable_static_power_gating(adev, i); + } + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + + return 0; +} + +/** + * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, + struct dpg_pause_state *new_state) +{ + uint32_t reg_data = 0; + int ret_code; + + /* pause/unpause if state is changed */ + if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d", + adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); + reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + if (!ret_code) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + } + } else { + /* unpause dpg, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); + } + adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + +/** + * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified read pointer + */ +static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); +} + +/** + * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified write pointer + */ +static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) + return *ring->wptr_cpu_addr; + else + return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); +} + +/** + * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) { + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .get_rptr = vcn_v5_0_0_unified_ring_get_rptr, + .get_wptr = vcn_v5_0_0_unified_ring_get_wptr, + .set_wptr = vcn_v5_0_0_unified_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_unified_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +/** + * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions + * + * @adev: amdgpu_device pointer + * + * Set unified ring functions + */ +static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs; + adev->vcn.inst[i].ring_enc[0].me = i; + + DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i); + } +} + +/** + * vcn_v5_0_0_is_idle - check VCN block is idle + * + * @handle: amdgpu_device pointer + * + * Check whether VCN block is idle + */ +static bool vcn_v5_0_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); + } + + return ret; +} + +/** + * vcn_v5_0_0_wait_for_idle - wait for VCN block idle + * + * @handle: amdgpu_device pointer + * + * Wait for VCN block idle + */ +static int vcn_v5_0_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); + if (ret) + return ret; + } + + return ret; +} + +/** + * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state + * + * @handle: amdgpu_device pointer + * @state: clock gating state + * + * Set VCN block clockgating state + */ +static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (enable) { + if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v5_0_0_enable_clock_gating(adev, i); + } else { + vcn_v5_0_0_disable_clock_gating(adev, i); + } + } + + return 0; +} + +/** + * vcn_v5_0_0_set_powergating_state - set VCN block powergating state + * + * @handle: amdgpu_device pointer + * @state: power gating state + * + * Set VCN block powergating state + */ +static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->vcn.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v5_0_0_stop(adev); + else + ret = vcn_v5_0_0_start(adev); + + if (!ret) + adev->vcn.cur_state = state; + + return ret; +} + +/** + * vcn_v5_0_0_set_interrupt_state - set VCN block interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block interrupt state + */ +static int vcn_v5_0_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, + unsigned type, enum amdgpu_interrupt_state state) +{ + return 0; +} + +/** + * vcn_v5_0_0_process_interrupt - process VCN block interrupt + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @entry: interrupt entry from clients and sources + * + * Process VCN block interrupt + */ +static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + + DRM_DEBUG("IH: VCN TRAP\n"); + + switch (entry->src_id) { + case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); + break; + case VCN_4_0__SRCID_UVD_POISON: + amdgpu_vcn_process_poison_irq(adev, source, entry); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = { + .set = vcn_v5_0_0_set_interrupt_state, + .process = vcn_v5_0_0_process_interrupt, +}; + +/** + * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions + * + * @adev: amdgpu_device pointer + * + * Set VCN block interrupt irq functions + */ +static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs; + } +} + +static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { + .name = "vcn_v5_0_0", + .early_init = vcn_v5_0_0_early_init, + .late_init = NULL, + .sw_init = vcn_v5_0_0_sw_init, + .sw_fini = vcn_v5_0_0_sw_fini, + .hw_init = vcn_v5_0_0_hw_init, + .hw_fini = vcn_v5_0_0_hw_fini, + .suspend = vcn_v5_0_0_suspend, + .resume = vcn_v5_0_0_resume, + .is_idle = vcn_v5_0_0_is_idle, + .wait_for_idle = vcn_v5_0_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, + .set_powergating_state = vcn_v5_0_0_set_powergating_state, +}; + +const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &vcn_v5_0_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h new file mode 100644 index 000000000000..51bbccd4360f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCN_V5_0_0_H__ +#define __VCN_V5_0_0_H__ + +#define VCN_VID_SOC_ADDRESS 0x1FC00 +#define VCN_AON_SOC_ADDRESS 0x1F800 +#define VCN1_VID_SOC_ADDRESS 0x48300 +#define VCN1_AON_SOC_ADDRESS 0x48000 + +#define VCN_VID_IP_ADDRESS 0x0 +#define VCN_AON_IP_ADDRESS 0x30000 + +extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block; + +#endif /* __VCN_V5_0_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index d1caaf0e6a7c..2e9b64edb8d2 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -2518,7 +2518,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0x8b6eff7b, 0x00000400, 0xbfa20045, 0xbf830010, 0xb8fbf803, 0xbfa0fffa, - 0x8b6eff7b, 0x00000900, + 0x8b6eff7b, 0x00160900, 0xbfa20015, 0x8b6eff7b, 0x000071ff, 0xbfa10008, 0x8b6fff7b, 0x00007080, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 71b3dc0c7363..7568ff3af978 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -81,6 +81,11 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800 var SQ_WAVE_TRAPSTS_EXCP_HI_MASK = 0x7000 +#if ASIC_FAMILY >= CHIP_PLUM_BONITO +var SQ_WAVE_TRAPSTS_WAVE_START_MASK = 0x20000 +var SQ_WAVE_TRAPSTS_WAVE_END_MASK = 0x40000 +var SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK = 0x100000 +#endif var SQ_WAVE_MODE_EXCP_EN_SHIFT = 12 var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT = 19 @@ -92,6 +97,16 @@ var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000 var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800 +#if ASIC_FAMILY < CHIP_PLUM_BONITO +var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK +#else +var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_TRAPSTS_MEM_VIOL_MASK |\ + SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK |\ + SQ_WAVE_TRAPSTS_WAVE_START_MASK |\ + SQ_WAVE_TRAPSTS_WAVE_END_MASK |\ + SQ_WAVE_TRAPSTS_TRAP_AFTER_INST_MASK +#endif + // bits [31:24] unused by SPI debug data var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31 var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000 @@ -224,7 +239,7 @@ L_NOT_HALTED: // Check non-maskable exceptions. memory_violation, illegal_instruction // and xnack_error exceptions always cause the wave to enter the trap // handler. - s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK + s_and_b32 ttmp2, s_save_trapsts, S_TRAPSTS_NON_MASKABLE_EXCP_MASK s_cbranch_scc1 L_FETCH_2ND_TRAP // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cd8e459201f1..002b08fa632f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -55,6 +55,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -64,6 +65,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = { /* Scalar L1 Instruction Cache (in SQC module) per bank */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -73,6 +75,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = { /* Scalar L1 Data Cache (in SQC module) per bank */ .cache_size = 8, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -88,6 +91,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -95,8 +99,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { }, { /* Scalar L1 Instruction Cache (in SQC module) per bank */ - .cache_size = 8, + .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -104,8 +109,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { }, { /* Scalar L1 Data Cache (in SQC module) per bank. */ - .cache_size = 4, + .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -135,6 +141,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -144,6 +151,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -153,6 +161,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -162,6 +171,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 4096, .cache_level = 2, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -174,6 +184,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -183,6 +194,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -192,6 +204,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -201,6 +214,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 1024, .cache_level = 2, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -213,6 +227,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -222,6 +237,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -231,6 +247,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -240,6 +257,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 1024, .cache_level = 2, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -252,6 +270,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -261,6 +280,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -270,6 +290,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -279,6 +300,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 2048, .cache_level = 2, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -291,6 +313,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -300,6 +323,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -309,6 +333,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -318,6 +343,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 8192, .cache_level = 2, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -330,6 +356,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -339,6 +366,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -348,6 +376,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -357,6 +386,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 8192, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -369,6 +399,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -378,6 +409,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -387,6 +419,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -396,6 +429,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -405,6 +439,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 4096, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -417,6 +452,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -426,6 +462,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -435,6 +472,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -444,6 +482,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -453,6 +492,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 1024, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -465,6 +505,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -474,6 +515,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -483,6 +525,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -492,6 +535,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -501,6 +545,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 2048, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -513,6 +558,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -522,6 +568,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -531,6 +578,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -540,6 +588,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -549,6 +598,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 4096, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -558,6 +608,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = { /* L3 Data Cache per GPU */ .cache_size = 128*1024, .cache_level = 3, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -570,6 +621,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -579,6 +631,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -588,6 +641,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -597,6 +651,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -606,6 +661,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 3072, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -615,6 +671,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = { /* L3 Data Cache per GPU */ .cache_size = 96*1024, .cache_level = 3, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -627,6 +684,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -636,6 +694,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -645,6 +704,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -654,6 +714,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -663,6 +724,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 2048, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -672,6 +734,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = { /* L3 Data Cache per GPU */ .cache_size = 32*1024, .cache_level = 3, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -684,6 +747,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -693,6 +757,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -702,6 +767,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -711,6 +777,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -720,6 +787,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 1024, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -729,6 +797,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = { /* L3 Data Cache per GPU */ .cache_size = 16*1024, .cache_level = 3, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -741,6 +810,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -750,6 +820,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -759,6 +830,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -768,6 +840,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -777,6 +850,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 2048, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -789,6 +863,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -798,6 +873,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -807,6 +883,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -816,6 +893,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -825,6 +903,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 256, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -837,6 +916,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -846,6 +926,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -855,6 +936,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -864,6 +946,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -873,6 +956,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 256, .cache_level = 2, + .cache_line_size = 128, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -885,6 +969,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = { /* TCP L1 Cache per CU */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -894,6 +979,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = { /* Scalar L1 Instruction Cache per SQC */ .cache_size = 32, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_INST_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -903,6 +989,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = { /* Scalar L1 Data Cache per SQC */ .cache_size = 16, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -912,6 +999,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = { /* GL1 Data Cache per SA */ .cache_size = 128, .cache_level = 1, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), @@ -921,6 +1009,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = { /* L2 Data Cache per GPU (Total Tex Cache) */ .cache_size = 2048, .cache_level = 2, + .cache_line_size = 64, .flags = (CRAT_CACHE_FLAGS_ENABLED | CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_SIMD_CACHE), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 74c2d7a0d628..300634b9f668 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -303,6 +303,7 @@ struct kfd_node; struct kfd_gpu_cache_info { uint32_t cache_size; uint32_t cache_level; + uint32_t cache_line_size; uint32_t flags; /* Indicates how many Compute Units share this cache * within a SA. Value = 1 indicates the cache is not shared diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 9ec750666382..d889e3545120 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -1018,12 +1018,14 @@ int kfd_dbg_trap_device_snapshot(struct kfd_process *target, uint32_t *entry_size) { struct kfd_dbg_device_info_entry device_info; - uint32_t tmp_entry_size = *entry_size, tmp_num_devices; + uint32_t tmp_entry_size, tmp_num_devices; int i, r = 0; if (!(target && user_info && number_of_device_infos && entry_size)) return -EINVAL; + tmp_entry_size = *entry_size; + tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds); *number_of_device_infos = target->n_pdds; *entry_size = min_t(size_t, *entry_size, sizeof(device_info)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 739721254a5d..9b33d9d2c9ad 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -1285,8 +1285,10 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID; int user_gpu_id; - if (!p) + if (!p) { + dev_warn(dev->adev->dev, "Not find process with pasid:%d\n", pasid); return; /* Presumably process exited. */ + } user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); if (unlikely(user_gpu_id == -EINVAL)) { @@ -1322,6 +1324,8 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) } } + dev_warn(dev->adev->dev, "Send SIGBUS to process %s(pasid:%d)\n", + p->lead_thread->comm, pasid); rcu_read_unlock(); /* user application will handle SIGBUS signal */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 6604a3f99c5e..4a64307bc438 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -36,6 +36,7 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/processor.h> +#include "amdgpu_vm.h" /* * The primary memory I/O features being added for revisions of gfxip @@ -326,10 +327,16 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) * with small reserved space for kernel. * Set them to CANONICAL addresses. */ - pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_base = max(SVM_USER_BASE, AMDGPU_VA_RESERVED_BOTTOM); pdd->gpuvm_limit = pdd->dev->kfd->shared_resources.gpuvm_size - 1; + /* dGPUs: the reserved space for kernel + * before SVM + */ + pdd->qpd.cwsr_base = SVM_CWSR_BASE; + pdd->qpd.ib_base = SVM_IB_BASE; + pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); } @@ -339,18 +346,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_V9(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - /* Raven needs SVM to support graphic handle, etc. Leave the small - * reserved space before SVM on Raven as well, even though we don't - * have to. - * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they - * are used in Thunk to reserve SVM. - */ - pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_base = AMDGPU_VA_RESERVED_BOTTOM; pdd->gpuvm_limit = pdd->dev->kfd->shared_resources.gpuvm_size - 1; pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); + + /* + * Place TBA/TMA on opposite side of VM hole to prevent + * stray faults from triggering SVM on these pages. + */ + pdd->qpd.cwsr_base = AMDGPU_VA_RESERVED_TRAP_START(pdd->dev->adev); } int kfd_init_apertures(struct kfd_process *process) @@ -407,12 +414,6 @@ int kfd_init_apertures(struct kfd_process *process) return -EINVAL; } } - - /* dGPUs: the reserved space for kernel - * before SVM - */ - pdd->qpd.cwsr_base = SVM_CWSR_BASE; - pdd->qpd.ib_base = SVM_IB_BASE; } dev_dbg(kfd_device, "node id %u\n", id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index a7697ec8188e..9a06c6fb6605 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -132,6 +132,7 @@ enum SQ_INTERRUPT_ERROR_TYPE { static void event_interrupt_poison_consumption(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { + enum amdgpu_ras_block block = 0; int old_poison, ret = -EINVAL; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); @@ -151,12 +152,14 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: ret = kfd_dqm_evict_pasid(dev->dqm, pasid); + block = AMDGPU_RAS_BLOCK__GFX; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: case SOC15_IH_CLIENTID_SDMA2: case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: + block = AMDGPU_RAS_BLOCK__SDMA; break; default: break; @@ -171,12 +174,12 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, dev_warn(dev->adev->dev, "RAS poison consumption, unmap queue flow succeeded: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); } else { dev_warn(dev->adev->dev, "RAS poison consumption, fall back to gpu reset flow: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 2a65792fd116..7e2859736a55 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -191,6 +191,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1) static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, uint16_t pasid, uint16_t source_id) { + enum amdgpu_ras_block block = 0; int ret = -EINVAL; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); @@ -210,9 +211,11 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, case SOC15_INTSRC_SQ_INTERRUPT_MSG: if (dev->dqm->ops.reset_queues) ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); + block = AMDGPU_RAS_BLOCK__GFX; break; case SOC21_INTSRC_SDMA_ECC: default: + block = AMDGPU_RAS_BLOCK__GFX; break; } @@ -221,9 +224,9 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, /* resetting queue passes, do page retirement without gpu reset resetting queue fails, fallback to gpu reset solution */ if (!ret) - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); else - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); } static bool event_interrupt_isr_v11(struct kfd_node *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 27cdaea40501..91dd5e045b51 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -143,6 +143,7 @@ enum SQ_INTERRUPT_ERROR_TYPE { static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { + enum amdgpu_ras_block block = 0; int old_poison, ret = -EINVAL; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); @@ -162,12 +163,14 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: ret = kfd_dqm_evict_pasid(dev->dqm, pasid); + block = AMDGPU_RAS_BLOCK__GFX; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: case SOC15_IH_CLIENTID_SDMA2: case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: + block = AMDGPU_RAS_BLOCK__SDMA; break; default: break; @@ -182,12 +185,12 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, dev_warn(dev->adev->dev, "RAS poison consumption, unmap queue flow succeeded: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); } else { dev_warn(dev->adev->dev, "RAS poison consumption, fall back to gpu reset flow: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 57bf5e513f4d..e5cc697a3ca8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -128,6 +128,31 @@ struct mqd_manager { uint32_t mqd_size; }; +struct mqd_user_context_save_area_header { + /* Byte offset from start of user context + * save area to the last saved top (lowest + * address) of control stack data. Must be + * 4 byte aligned. + */ + uint32_t control_stack_offset; + + /* Byte size of the last saved control stack + * data. Must be 4 byte aligned. + */ + uint32_t control_stack_size; + + /* Byte offset from start of user context save + * area to the last saved base (lowest address) + * of wave state data. Must be 4 byte aligned. + */ + uint32_t wave_state_offset; + + /* Byte size of the last saved wave state data. + * Must be 4 byte aligned. + */ + uint32_t wave_state_size; +}; + struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index d722cbd31783..826bc4f6c8a7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); if (has_wa_flag) { - uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? - 0xffff : 0xffffffff; + uint32_t wa_mask = + (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff; m->compute_static_thread_mgmt_se0 = wa_mask; m->compute_static_thread_mgmt_se1 = wa_mask; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 42d881809dc7..697b6d530d12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, update_cu_mask(mm, mqd, minfo, 0); set_priority(m, q); + if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) { + if (minfo->update_flag & UPDATE_FLAG_IS_GWS) + m->compute_resource_limits |= + COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + else + m->compute_resource_limits &= + ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + } + q->is_active = QUEUE_IS_ACTIVE(*q); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 677281c0793e..80320b8603fc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -532,6 +532,7 @@ struct queue_properties { enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, UPDATE_FLAG_DBG_WA_DISABLE = 2, + UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */ }; struct mqd_update_info { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43eff221eae5..4858112f9a53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { + struct mqd_update_info minfo = {0}; struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; @@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; + minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q, NULL); + pqn->q, &minfo); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c50a0dc9c9c0..f0f7f48af413 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1515,9 +1515,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) goto unreserve_out; } - r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, - drm_priv_to_vm(pdd->drm_priv), - svm_range_bo_validate, NULL); + r = amdgpu_vm_validate(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), NULL, + svm_range_bo_validate, NULL); if (r) { pr_debug("failed %d validate pt bos\n", r); goto unreserve_out; @@ -1641,7 +1641,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm, goto free_ctx; } - svm_range_reserve_bos(ctx, intr); + r = svm_range_reserve_bos(ctx, intr); + if (r) + goto free_ctx; p = container_of(prange->svms, struct kfd_process, svms); owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e5f7c92eebcb..bc9eb847ecfe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1564,6 +1564,7 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext, pcache->processor_id_low = cu_processor_id + (first_active_cu - 1); pcache->cache_level = pcache_info[cache_type].cache_level; pcache->cache_size = pcache_info[cache_type].cache_size; + pcache->cacheline_size = pcache_info[cache_type].cache_line_size; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; @@ -1632,18 +1633,17 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, pcache->processor_id_low = cu_processor_id + (first_active_cu - 1); pcache->cache_level = pcache_info[cache_type].cache_level; + pcache->cacheline_size = pcache_info[cache_type].cache_line_size; if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3)) mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); else mode = UNKNOWN_MEMORY_PARTITION_MODE; - if (pcache->cache_level == 2) - pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; - else if (mode) - pcache->cache_size = pcache_info[cache_type].cache_size / mode; - else - pcache->cache_size = pcache_info[cache_type].cache_size; + pcache->cache_size = pcache_info[cache_type].cache_size; + /* Partition mode only affects L3 cache size */ + if (mode && pcache->cache_level == 3) + pcache->cache_size /= mode; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; @@ -1705,6 +1705,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct gpu_processor_id = dev->node_props.simd_id_base; + memset(cache_info, 0, sizeof(cache_info)); pcache_info = cache_info; num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info); if (!num_of_cache_types) { diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO deleted file mode 100644 index a8a6c106e8c7..000000000000 --- a/drivers/gpu/drm/amd/display/TODO +++ /dev/null @@ -1,110 +0,0 @@ -=============================================================================== -TODOs -=============================================================================== - -1. Base this on drm-next - WIP - - -2. Cleanup commit history - - -3. WIP - Drop page flip helper and use DRM's version - - -4. DONE - Flatten all DC objects - * dc_stream/core_stream/stream should just be dc_stream - * Same for other DC objects - - "Is there any major reason to keep all those abstractions? - - Could you collapse everything into struct dc_stream? - - I haven't looked recently but I didn't get the impression there was a - lot of design around what was public/protected, more whatever needed - to be used by someone else was in public." - ~ Dave Airlie - - -5. DONE - Rename DC objects to align more with DRM - * dc_surface -> dc_plane_state - * dc_stream -> dc_stream_state - - -6. DONE - Per-plane and per-stream validation - - -7. WIP - Per-plane and per-stream commit - - -8. WIP - Split pipe_ctx into plane and stream resource structs - - -9. Attach plane and stream reources to state object instead of validate_context - - -10. Remove dc_edid_caps and drm_helpers_parse_edid_caps - * Use drm_display_info instead - * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed) - - "Making sure you use the sink-specific helper libraries and kernel - subsystems, since there's really no good reason to have 2nd - implementation of those in the kernel. Looks likes that's done for mst - and edid parsing. There's still a bit a midlayer feeling to the edid - parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I - think it'd be much better if you convert that over to reading stuff - from drm_display_info and if needed, push stuff into the core). Also, - I can't come up with a good reason why DC needs all this (except to - reimplement half of our edid quirk table, which really isn't a good - idea). Might be good if you put this onto the list of things to fix - long-term, but imo not a blocker. Definitely make sure new stuff - doesn't slip in (i.e. if you start adding edid quirks to DC instead of - the drm core, refactoring to use the core edid stuff was pointless)." - ~ Daniel Vetter - - -11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an -overy complicated HW programming function for sendind and receiving i2c/aux -commands. We can greatly simplify that and move it into dc/dceXYZ like other -HW blocks. - -12. drm_modeset_lock in MST should no longer be needed in recent kernels - * Adopt appropriate locking scheme - -13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out -a few indirections, and consider removing entirely and using the -drm_atomic_helper_best_encoder default behaviour. - -14. core/dc_debug.c, consider switching to the atomic state debug helpers and -moving all your driver state printing into the various atomic_print_state -callbacks. There's also plans to expose this stuff in a standard way across all -drivers, to make debugging userspace compositors easier across different hw. - -15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See -dal_ddc_service_i2c_query_dp_dual_mode_adaptor. - -16. Move to core SCDC helpers (I think those are new since initial DC review). - -17. There's still a pretty massive layer cake around dp aux and DPCD handling, -with like 3 levels of abstraction and using your own structures instead of the -stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2 -incompatible styles, just means more reasons not to add a third (or well third -one gets to do the cleanup refactor). - -18. There's a pile of sink handling code, both for DP and HDMI where I didn't -immediately recognize the standard. I think long term it'd be best for the drm -subsystem if we try to move as much of that into helpers/core as possible, and -share it with drivers. But that's a very long term goal, and by far not just an -issue with DC - other drivers, especially around DP sink handling, are equally -guilty. - -19. DONE - The DC logger is still a rather sore thing, but I know that the -DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out -something that integrates better with DRM and linux debug printing, while not -being useless with filtering output. dynamic debug printing might be an option. - -20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI -retimer that we need to program to pass PHY compliance. Currently that's -bypassing the i2c device and goes directly to HW. This should be changed. - -21. Remove vector.c from dc/basics. It's used in DDC code which can probably -be simplified enough to no longer need a vector implementation. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d292f290cd6e..bcdd4f28b64c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -67,6 +67,7 @@ #include "amdgpu_dm_debugfs.h" #endif #include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" #include "ivsrcid/ivsrcid_vislands30.h" @@ -1843,21 +1844,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub aux callback"); goto error; } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - } - - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. - * It is expected that DMUB will resend any pending notifications at this point, for - * example HPD from DPIA. - */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point. Note + * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to + * align legacy interface initialization sequence. Connection status will be proactivly + * detected once in the amdgpu_dm_initialize_drm_device. + */ dc_enable_dmub_outbox(adev->dm.dc); /* DPIA trace goes to dmesg logs only if outbox is enabled */ @@ -1938,17 +1930,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) adev->dm.hdcp_workqueue = NULL; } - if (adev->dm.dc) + if (adev->dm.dc) { dc_deinit_callbacks(adev->dm.dc); - - if (adev->dm.dc) dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); - - if (dc_enable_dmub_notifications(adev->dm.dc)) { - kfree(adev->dm.dmub_notify); - adev->dm.dmub_notify = NULL; - destroy_workqueue(adev->dm.delayed_hpd_wq); - adev->dm.delayed_hpd_wq = NULL; + if (dc_enable_dmub_notifications(adev->dm.dc)) { + kfree(adev->dm.dmub_notify); + adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; + } } if (adev->dm.dmub_bo) @@ -1956,7 +1946,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -2121,6 +2111,17 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) const struct dmcub_firmware_header_v1_0 *hdr; enum dmub_asic dmub_asic; enum dmub_status status; + static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE + }; int r; switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { @@ -2218,7 +2219,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + PSP_HEADER_BYTES; - region_params.is_mailbox_in_inbox = false; + region_params.window_memory_type = window_memory_type; status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, ®ion_info); @@ -2246,6 +2247,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; memory_params.region_info = ®ion_info; + memory_params.window_memory_type = window_memory_type; adev->dm.dmub_fb_info = kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); @@ -2287,6 +2289,7 @@ static int dm_sw_fini(void *handle) if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); + kfree(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } @@ -3536,6 +3539,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -3564,10 +3575,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev) handle_hpd_rx_irq, (void *) aconnector); } - - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[connector->index].aconnector = - aconnector; } } @@ -4399,6 +4406,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; bool psr_feature_enabled = false; + bool replay_feature_enabled = false; int max_overlay = dm->dc->caps.max_slave_planes; dm->display_indexes_num = dm->dc->caps.max_streams; @@ -4510,6 +4518,23 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } + /* Determine whether to enable Replay support by default. */ + if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + replay_feature_enabled = true; + break; + default: + replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; + break; + } + } + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4561,6 +4586,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } + if (dm->hpd_rx_offload_wq) + dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = + aconnector; + if (!dc_link_detect_connection_type(link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); @@ -4578,6 +4607,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_dm_update_connector_after_detect(aconnector); setup_backlight_device(dm, aconnector); + /* Disable PSR if Replay can be enabled */ + if (replay_feature_enabled) + if (amdgpu_dm_set_replay_caps(link, aconnector)) + psr_feature_enabled = false; + if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); @@ -5219,6 +5253,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, struct dc_flip_addrs *flip_addrs, + bool is_psr_su, bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); @@ -5243,6 +5278,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, num_clips = drm_plane_get_damage_clips_count(new_plane_state); clips = drm_plane_get_damage_clips(new_plane_state); + if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && + is_psr_su))) + goto ffu; + if (!dm_crtc_state->mpo_requested) { if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) goto ffu; @@ -6194,7 +6233,9 @@ create_stream_for_sink(struct drm_connector *connector, if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); + saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; drm_mode_copy(&mode, freesync_mode); + mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); @@ -6402,10 +6443,82 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, return ret; } +/** + * DOC: panel power savings + * + * The display manager allows you to set your desired **panel power savings** + * level (between 0-4, with 0 representing off), e.g. using the following:: + * + * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ + +static ssize_t panel_power_savings_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct drm_connector *connector = dev_get_drvdata(device); + struct drm_device *dev = connector->dev; + u8 val; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + val = to_dm_connector_state(connector->state)->abm_level == + ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : + to_dm_connector_state(connector->state)->abm_level; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t panel_power_savings_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_connector *connector = dev_get_drvdata(device); + struct drm_device *dev = connector->dev; + long val; + int ret; + + ret = kstrtol(buf, 0, &val); + + if (ret) + return ret; + + if (val < 0 || val > 4) + return -EINVAL; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + to_dm_connector_state(connector->state)->abm_level = val ?: + ABM_LEVEL_IMMEDIATE_DISABLE; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + drm_kms_helper_hotplug_event(dev); + + return count; +} + +static DEVICE_ATTR_RW(panel_power_savings); + +static struct attribute *amdgpu_attrs[] = { + &dev_attr_panel_power_savings.attr, + NULL +}; + +static const struct attribute_group amdgpu_group = { + .name = "amdgpu", + .attrs = amdgpu_attrs +}; + static void amdgpu_dm_connector_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + amdgpu_dm_abm_level < 0) + sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); + drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); } @@ -6467,9 +6580,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->vcpi_slots = 0; state->pbn = 0; - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - state->abm_level = amdgpu_dm_abm_level ?: - ABM_LEVEL_IMMEDIATE_DISABLE; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (amdgpu_dm_abm_level <= 0) + state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + else + state->abm_level = amdgpu_dm_abm_level; + } __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -6507,6 +6623,14 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); int r; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + amdgpu_dm_abm_level < 0) { + r = sysfs_create_group(&connector->kdev->kobj, + &amdgpu_group); + if (r) + return r; + } + amdgpu_dm_register_backlight_device(amdgpu_dm_connector); if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || @@ -6527,10 +6651,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; /* * Note: drm_get_edid gets edid in the following order: @@ -6538,7 +6667,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) * 2) firmware EDID if set via edid_firmware module parameter * 3) regular DDC read. */ - edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + edid = drm_get_edid(connector, ddc); if (!edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; @@ -6579,12 +6708,18 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base); + struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; /* * Note: drm_get_edid gets edid in the following order: @@ -6592,7 +6727,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) * 2) firmware EDID if set via edid_firmware module parameter * 3) regular DDC read. */ - edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + edid = drm_get_edid(connector, ddc); if (!edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; @@ -7530,7 +7665,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; if (connector_type == DRM_MODE_CONNECTOR_eDP && - (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { + (dc_is_dmcu_initialized(adev->dm.dc) || + adev->dm.dc->ctx->dmub_srv) && amdgpu_dm_abm_level < 0) { drm_object_attach_property(&aconnector->base.base, adev->mode_info.abm_level_property, 0); } @@ -8298,6 +8434,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], + acrtc_state->stream->link->psr_settings.psr_version == + DC_PSR_VERSION_SU_1, &dirty_rects_changed); /* @@ -8526,10 +8664,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) - amdgpu_dm_link_setup_psr(acrtc_state->stream); + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (acrtc_state->stream->link->replay_settings.config.replay_supported && + !acrtc_state->stream->link->replay_settings.replay_feature_enabled) { + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); + } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && + !acrtc_state->stream->link->psr_settings.psr_feature_enabled) { + + struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) + acrtc_state->stream->dm_stream_context; + + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + } + } /* Decrement skip count when PSR is enabled and we're doing fast updates. */ if (acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8556,6 +8706,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif !acrtc_state->stream->link->psr_settings.psr_allow_active && + !aconn->disallow_edp_enter_psr && (timestamp_ns - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 500000000) @@ -8818,11 +8969,12 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, } } /* for_each_crtc_in_state() */ - /* if there mode set or reset, disable eDP PSR */ + /* if there mode set or reset, disable eDP PSR, Replay */ if (mode_set_reset_required) { if (dm->vblank_control_workqueue) flush_workqueue(dm->vblank_control_workqueue); + amdgpu_dm_replay_disable_all(dm); amdgpu_dm_psr_disable_all(dm); } @@ -10731,11 +10883,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); - if (ret) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; - goto fail; + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 9c1871b866cc..09519b7abf67 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -693,6 +693,7 @@ struct amdgpu_dm_connector { struct drm_display_mode freesync_vid_base; int psr_skip_count; + bool disallow_edp_enter_psr; /* Record progress status of mst*/ uint8_t mst_status; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 6e715ef3a556..e23a0a276e33 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -29,6 +29,7 @@ #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_trace.h" @@ -95,6 +96,61 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +/** + * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features. + * + * @vblank_work: is a pointer to a struct vblank_control_work object. + * @vblank_enabled: indicates whether the DRM vblank counter is currently + * enabled (true) or disabled (false). + * @allow_sr_entry: represents whether entry into the self-refresh mode is + * allowed (true) or not allowed (false). + * + * The DRM vblank counter enable/disable action is used as the trigger to enable + * or disable various panel self-refresh features: + * + * Panel Replay and PSR SU + * - Enable when: + * - vblank counter is disabled + * - entry is allowed: usermode demonstrates an adequate number of fast + * commits) + * - CRC capture window isn't active + * - Keep enabled even when vblank counter gets enabled + * + * PSR1 + * - Enable condition same as above + * - Disable when vblank counter is enabled + */ +static void amdgpu_dm_crtc_set_panel_sr_feature( + struct vblank_control_work *vblank_work, + bool vblank_enabled, bool allow_sr_entry) +{ + struct dc_link *link = vblank_work->stream->link; + bool is_sr_active = (link->replay_settings.replay_allow_active || + link->psr_settings.psr_allow_active); + bool is_crc_window_active = false; + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + is_crc_window_active = + amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); +#endif + + if (link->replay_settings.replay_feature_enabled && + allow_sr_entry && !is_sr_active && !is_crc_window_active) { + amdgpu_dm_replay_enable(vblank_work->stream, true); + } else if (vblank_enabled) { + if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) + amdgpu_dm_psr_disable(vblank_work->stream); + } else if (link->psr_settings.psr_feature_enabled && + allow_sr_entry && !is_sr_active && !is_crc_window_active) { + + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context; + + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_psr_enable(vblank_work->stream); + } +} + static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = @@ -123,18 +179,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) * fill_dc_dirty_rects(). */ if (vblank_work->stream && vblank_work->stream->link) { - if (vblank_work->enable) { - if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && - vblank_work->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(vblank_work->stream); - } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && - !vblank_work->stream->link->psr_settings.psr_allow_active && -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY - !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && -#endif - vblank_work->acrtc->dm_irq_params.allow_psr_entry) { - amdgpu_dm_psr_enable(vblank_work->stream); - } + amdgpu_dm_crtc_set_panel_sr_feature( + vblank_work, vblank_work->enable, + vblank_work->acrtc->dm_irq_params.allow_psr_entry || + vblank_work->stream->link->replay_settings.replay_feature_enabled); } mutex_unlock(&dm->dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 68a846323912..eee4945653e2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, const uint32_t rd_buf_size = 10; struct pipe_ctx *pipe_ctx; ssize_t result = 0; - int i, r, str_len = 30; + int i, r, str_len = 10; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); @@ -2971,6 +2971,53 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val) return 0; } +/* check if kernel disallow eDP enter psr state + * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 0: allow edp enter psr; 1: disallow + */ +static int disallow_edp_enter_psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *aconnector = data; + + *val = (u64) aconnector->disallow_edp_enter_psr; + return 0; +} + +/* set kernel disallow eDP enter psr state + * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 0: allow edp enter psr; 1: disallow + * + * usage: test app read crc from PSR eDP rx. + * + * during kernel boot up, kernel write dpcd 0x170 = 5. + * this notify eDP rx psr enable and let rx check crc. + * rx fw will start checking crc for rx internal logic. + * crc read count within dpcd 0x246 is not updated and + * value is 0. when eDP tx driver wants to read rx crc + * from dpcd 0x246, 0x270, read count 0 lead tx driver + * timeout. + * + * to avoid this, we add this debugfs to let test app to disbable + * rx crc checking for rx internal logic. then test app can read + * non-zero crc read count. + * + * expected app sequence is as below: + * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2. + * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but + * without dpcd 0x170 = 5. + * 4. read crc from rx dpcd 0x270, 0x246, etc. + * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr. + * this will let eDP back to normal with psr setup dpcd 0x170 = 5. + */ +static int disallow_edp_enter_psr_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *aconnector = data; + + aconnector->disallow_edp_enter_psr = val ? true : false; + return 0; +} + static int dmub_trace_mask_set(void *data, u64 val) { struct amdgpu_device *adev = data; @@ -3092,6 +3139,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, allow_edp_hotplug_detection_get, allow_edp_hotplug_detection_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops, + disallow_edp_enter_psr_get, + disallow_edp_enter_psr_set, "%llu\n"); + DEFINE_SHOW_ATTRIBUTE(current_backlight); DEFINE_SHOW_ATTRIBUTE(target_backlight); @@ -3265,6 +3316,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) &edp_ilr_debugfs_fops); debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, &allow_edp_hotplug_detection_fops); + debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector, + &disallow_edp_enter_psr_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 5ce542b1f860..738a58eebba7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -60,21 +60,26 @@ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connecto if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) return false; + // Sink shall populate line deviation information + if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 || + dpcd_caps->pr_info.max_deviation_line == 0) + return false; + return true; } /* - * amdgpu_dm_setup_replay() - setup replay configuration + * amdgpu_dm_set_replay_caps() - setup Replay capabilities * @link: link * @aconnector: aconnector * */ -bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector) { - struct replay_config pr_config; + struct replay_config pr_config = { 0 }; union replay_debug_flags *debug_flags = NULL; - // For eDP, if Replay is supported, return true to skip checks + // If Replay is already set to support, return true to skip checks if (link->replay_settings.config.replay_supported) return true; @@ -87,27 +92,50 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac if (!link_supports_replay(link, aconnector)) return false; - // Mark Replay is supported in link and update related attributes + // Mark Replay is supported in pr_config pr_config.replay_supported = true; - pr_config.replay_power_opt_supported = 0; - pr_config.replay_enable_option |= pr_enable_option_static_screen; - pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; - - if (!pr_config.replay_timing_sync_supported) - pr_config.replay_enable_option &= ~pr_enable_option_general_ui; debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; debug_flags->u32All = 0; debug_flags->bitfields.visual_confirm = link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY; - link->replay_settings.replay_feature_enabled = true; - init_replay_config(link, &pr_config); return true; } +/* + * amdgpu_dm_link_setup_replay() - configure replay link + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config *pr_config; + + if (link == NULL || aconnector == NULL) + return false; + + pr_config = &link->replay_settings.config; + + if (!pr_config->replay_supported) + return false; + + pr_config->replay_power_opt_supported = 0x11; + pr_config->replay_smu_opt_supported = false; + pr_config->replay_enable_option |= pr_enable_option_static_screen; + pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; + pr_config->replay_timing_sync_supported = false; + + if (!pr_config->replay_timing_sync_supported) + pr_config->replay_enable_option &= ~pr_enable_option_general_ui; + + link->replay_settings.replay_feature_enabled = true; + + return true; +} /* * amdgpu_dm_replay_enable() - enable replay f/w @@ -117,51 +145,23 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac */ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) { - uint64_t state; - unsigned int retry_count; bool replay_active = true; - const unsigned int max_retry = 1000; - bool force_static = true; struct dc_link *link = NULL; - if (stream == NULL) return false; link = stream->link; - if (link == NULL) - return false; - - link->dc->link_srv->edp_setup_replay(link, stream); - - link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL); - - link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL); - - if (wait == true) { - - for (retry_count = 0; retry_count <= max_retry; retry_count++) { - dc_link_get_replay_state(link, &state); - if (replay_active) { - if (state != REPLAY_STATE_0 && - (!force_static || state == REPLAY_STATE_3)) - break; - } else { - if (state == REPLAY_STATE_0) - break; - } - udelay(500); - } - - /* assert if max retry hit */ - if (retry_count >= max_retry) - ASSERT(0); - } else { - /* To-do: Add trace log */ + if (link) { + link->dc->link_srv->edp_setup_replay(link, stream); + link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total); + DRM_DEBUG_DRIVER("Enabling replay...\n"); + link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL); + return true; } - return true; + return false; } /* @@ -172,12 +172,31 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) */ bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) { + bool replay_active = false; + struct dc_link *link = NULL; - if (stream->link) { + if (stream == NULL) + return false; + + link = stream->link; + + if (link) { DRM_DEBUG_DRIVER("Disabling replay...\n"); - stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL); + link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL); return true; } return false; } + +/* + * amdgpu_dm_replay_disable_all() - disable replay f/w + * if replay is enabled on any stream + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm) +{ + DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n"); + return dc_set_replay_allow_active(dm->dc, false); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h index 01cba3cd6246..f0d30eb47312 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -40,7 +40,9 @@ enum replay_enable_option { bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable); -bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); bool amdgpu_dm_replay_disable(struct dc_stream_state *stream); +bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm); #endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 1090d235086a..bd1f60ecaba4 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -101,6 +101,40 @@ void convert_float_matrix( } } +static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg, + uint8_t integer_bits, + uint8_t fractional_bits) +{ + struct fixed31_32 result; + uint16_t sign_mask = 1 << (fractional_bits + integer_bits); + uint16_t value_mask = sign_mask - 1; + + result.value = (long long)(arg & value_mask) << + (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + + if (arg & sign_mask) + result = dc_fixpt_neg(result); + + return result; +} + +/** + * convert_hw_matrix - converts HW values into fixed31_32 matrix. + * @matrix: fixed point 31.32 matrix + * @reg: array of register values + * @buffer_size: size of the array of register values + * + * Converts HW register spec defined format S2D13 into a fixed-point 31.32 + * matrix. + */ +void convert_hw_matrix(struct fixed31_32 *matrix, + uint16_t *reg, + uint32_t buffer_size) +{ + for (int i = 0; i < buffer_size; ++i) + matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13); +} + static uint32_t find_gcd(uint32_t a, uint32_t b) { uint32_t remainder; diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h index 81da4e6f7a1a..a433cef78496 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h @@ -41,6 +41,10 @@ void convert_float_matrix( void reduce_fraction(uint32_t num, uint32_t den, uint32_t *out_num, uint32_t *out_den); +void convert_hw_matrix(struct fixed31_32 *matrix, + uint16_t *reg, + uint32_t buffer_size); + static inline unsigned int log_2(unsigned int num) { return ilog2(num); diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index f2dfa96f9ef5..39530b2ea495 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -94,7 +94,7 @@ static void calculate_bandwidth( const uint32_t s_high = 7; const uint32_t dmif_chunk_buff_margin = 1; - uint32_t max_chunks_fbc_mode; + uint32_t max_chunks_fbc_mode = 0; int32_t num_cursor_lines; int32_t i, j, k; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 960c4b4f6ddf..05f392501c0a 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2( /* Vega12 */ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); if (!smu_info_v3_2) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; } else if (revision.minor == 3) { /* Vega20 */ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); if (!smu_info_v3_3) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; } @@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11( info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); if (info_v11 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v11->gpucapinfo); /* @@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1( info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); if (info_v2_1 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_1->gpucapinfo); /* @@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2( info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); - if (info_v2_2 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_2->gpucapinfo); /* diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 818a529cacc3..86f9198e7501 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -37,7 +37,7 @@ #define EXEC_BIOS_CMD_TABLE(command, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GetIndexIntoMasterTable(COMMAND, command), \ - (uint32_t *)¶ms) == 0) + (uint32_t *)¶ms, sizeof(params)) == 0) #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 293a919d605d..cbae1be7b009 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -49,7 +49,7 @@ #define EXEC_BIOS_CMD_TABLE(fname, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname), \ - (uint32_t *)¶ms) == 0) + (uint32_t *)¶ms, sizeof(params)) == 0) #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 28a2a837d2f0..86ee4fe4f5e3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -340,7 +340,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; - break; } case AMDGPU_FAMILY_GC_11_0_1: { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 0c6a4ab72b1d..e3e1940198a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -707,9 +707,7 @@ void rn_clk_mgr_construct( int is_green_sardine = 0; struct clk_log_info log_info = {0}; -#if defined(CONFIG_DRM_AMD_DC_FP) is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); -#endif clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 8c9d45e5b13b..d72acbb049b1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -185,10 +185,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index e4f96b6fd79d..19e5b3be9275 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -180,10 +180,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index a5489fe6875f..aa9fd1dc550a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta int i; for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { + if (i >= VG_NUM_DCFCLK_DPM_LEVELS) + break; if (clock_table->SocVoltage[i] == voltage) return clock_table->DcfClocks[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 32279c5db724..6904e95113c1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -202,10 +202,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index 07baa10a8647..c4af406146b7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", - actual_dcfclk_set_mhz, - actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 1042cf1a3ab0..879f1494c4cd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c index 3ed19197a755..8b82092b91cd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c @@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index aadd07bc68c5..e64e45e4c833 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -387,7 +387,15 @@ static void dcn32_update_clocks_update_dentist( uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider; if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz)); + /* + * SMU uses discrete dispclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dispclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, + khz_to_mhz_floor(temp_dispclk_khz)); if (dc->debug.override_dispclk_programming) { REG_GET(DENTIST_DISPCLK_CNTL, @@ -426,7 +434,15 @@ static void dcn32_update_clocks_update_dentist( /* do requested DISPCLK updates*/ if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz)); + /* + * SMU uses discrete dispclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dispclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, + khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz)); if (dc->debug.override_dispclk_programming) { REG_GET(DENTIST_DISPCLK_CNTL, @@ -493,6 +509,8 @@ static void dcn32_auto_dpm_test_log( } } + msleep(5); + mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes; dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK @@ -734,7 +752,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; if (clk_mgr->smu_present && !dpp_clock_lowered) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); + /* + * SMU uses discrete dppclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dppclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, + khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz)); update_dppclk = true; } @@ -765,7 +791,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn32_update_clocks_update_dentist(clk_mgr, context); if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); + /* + * SMU uses discrete dppclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dppclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, + khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz)); } else { /* if clock is being raised, increase refclk before lowering DTO */ if (update_dppclk || update_dispclk) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index a34c258c19dc..c76352a817de 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -36,8 +36,7 @@ #define DALSMC_MSG_SetCabForUclkPstate 0x12 #define DALSMC_Result_OK 0x1 -void -dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); +void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 14cec1c7b718..c378b879c76d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -384,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn35_smu_enable_pme_wa(clk_mgr); } -void dcn35_init_clocks(struct clk_mgr *clk_mgr) -{ - uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; - - memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); - - // Assumption is that boot state always supports pstate - clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk - clk_mgr->clks.p_state_change_support = true; - clk_mgr->clks.prev_p_state_change_support = true; - clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; - clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; -} bool dcn35_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) @@ -422,6 +409,22 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs { } +static void init_clk_states(struct clk_mgr *clk_mgr) +{ + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; +} + +void dcn35_init_clocks(struct clk_mgr *clk_mgr) +{ + init_clk_states(clk_mgr); +} static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, @@ -655,10 +658,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0; + uint32_t num_memps, num_fclk, num_dcfclk; int i; /* Determine min/max p-state values. */ - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { + num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS : + clock_table->NumMemPstatesEnabled; + for (i = 0; i < num_memps; i++) { uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) { @@ -670,7 +676,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk min_dram_speed_mts = max_dram_speed_mts; min_pstate = max_pstate; - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { + for (i = 0; i < num_memps; i++) { uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) { @@ -699,9 +705,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0); - max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled); + num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS : + clock_table->NumFclkLevelsEnabled; + max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); - for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) { + num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : + clock_table->NumDcfClkLevelsEnabled; + for (i = 0; i < num_dcfclk; i++) { int j; /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ @@ -826,7 +836,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) } } -static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) +static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc *dc = clk_mgr_base->ctx->dc; @@ -874,7 +884,7 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) return ips_supported; } -static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base) +static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -883,7 +893,7 @@ static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base) static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) { - dcn35_init_clocks(clk_mgr); + init_clk_states(clk_mgr); /* TODO: Implement the functions and remove the ifndef guard */ } @@ -968,8 +978,8 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state + .set_idle_state = dcn35_set_ips_idle_state, + .get_idle_state = dcn35_get_ips_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index 6d4a1ffab5ed..9e588c56c570 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -361,32 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst case DCN_ZSTATE_SUPPORT_ALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 9) | (1 << 8); - smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_DISALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = 0; - smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10); - smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 8); - smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 8); - smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param); break; default: //DCN_ZSTATE_SUPPORT_UNKNOWN @@ -400,7 +400,7 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst clk_mgr, msg_id, param); - smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv); + smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv); } int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) @@ -447,6 +447,9 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { + if (!clk_mgr->smu_present) + return; + dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, @@ -458,6 +461,9 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) { int retv; + if (!clk_mgr->smu_present) + return 0; + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_DispPsrExit, @@ -470,6 +476,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) { int retv; + if (!clk_mgr->smu_present) + return 0; + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_QueryIPS2Support, @@ -481,6 +490,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) { + if (!clk_mgr->smu_present) + return; + REG_WRITE(MP1_SMN_C2PMSG_71, param); //smu_print("%s: write_ips_scratch = %x\n", __func__, param); } @@ -489,6 +501,9 @@ uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) { uint32_t retv; + if (!clk_mgr->smu_present) + return 0; + retv = REG_READ(MP1_SMN_C2PMSG_71); //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); return retv; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index aa7c02ba948e..4d5194293dbd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -414,6 +414,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, if (dc->optimized_required || dc->wm_optimized_required) return false; + dc_exit_ips_for_hw_access(dc); + stream->adjust.v_total_max = adjust->v_total_max; stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; @@ -454,6 +456,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, int i = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -484,6 +488,8 @@ bool dc_stream_get_crtc_position(struct dc *dc, bool ret = false; struct crtc_position position; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -603,6 +609,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, if (pipe == NULL) return false; + dc_exit_ips_for_hw_access(dc); + /* By default, capture the full frame */ param.windowa_x_start = 0; param.windowa_y_start = 0; @@ -662,6 +670,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, struct pipe_ctx *pipe; struct timing_generator *tg; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) @@ -686,6 +696,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, int i; struct pipe_ctx *pipe_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -721,6 +733,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, if (option > DITHER_OPTION_MAX) return; + dc_exit_ips_for_hw_access(stream->ctx->dc); + stream->dither_option = option; memset(¶ms, 0, sizeof(params)); @@ -745,6 +759,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -762,6 +778,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -788,6 +806,8 @@ void dc_stream_set_static_screen_params(struct dc *dc, struct pipe_ctx *pipes_affected[MAX_PIPES]; int num_pipes_affected = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < num_streams; i++) { struct dc_stream_state *stream = streams[i]; @@ -1766,6 +1786,8 @@ void dc_enable_stereo( int i, j; struct pipe_ctx *pipe; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (context != NULL) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -1785,6 +1807,8 @@ void dc_enable_stereo( void dc_trigger_sync(struct dc *dc, struct dc_state *context) { if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { + dc_exit_ips_for_hw_access(dc); + enable_timing_multisync(dc, context); program_timing_sync(dc, context); } @@ -2041,6 +2065,8 @@ enum dc_status dc_commit_streams(struct dc *dc, if (!streams_changed(dc, streams, stream_count)) return res; + dc_exit_ips_for_hw_access(dc); + DC_LOG_DC("%s: %d streams\n", __func__, stream_count); for (i = 0; i < stream_count; i++) { @@ -2428,6 +2454,10 @@ static enum surface_update_type get_scaling_info_update_type( /* Changing clip size of a large surface may result in MPC slice count change */ update_flags->bits.bandwidth_change = 1; + if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || + u->scaling_info->clip_rect.height != u->surface->clip_rect.height) + update_flags->bits.clip_size_change = 1; + if (u->scaling_info->src_rect.x != u->surface->src_rect.x || u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->clip_rect.x != u->surface->clip_rect.x @@ -2441,7 +2471,8 @@ static enum surface_update_type get_scaling_info_update_type( || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.position_change) + if (update_flags->bits.position_change || + update_flags->bits.clip_size_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -3376,6 +3407,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, int i, j; struct pipe_ctx *top_pipe_to_program = NULL; struct dc_stream_status *stream_status = NULL; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); top_pipe_to_program = resource_get_otg_master_for_stream( @@ -3503,10 +3536,23 @@ static void commit_planes_for_stream(struct dc *dc, // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context subvp_prev_use = false; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); if (update_type == UPDATE_TYPE_FULL) wait_for_outstanding_hw_updates(dc, context); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); + + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); + } + } + if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); @@ -3541,17 +3587,6 @@ static void commit_planes_for_stream(struct dc *dc, } } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream && pipe->plane_state) { - set_p_state_switch_method(dc, context, pipe); - - if (dc->debug.visual_confirm) - dc_update_visual_confirm_color(dc, context, pipe); - } - } - if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3817,7 +3852,9 @@ static void commit_planes_for_stream(struct dc *dc, * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ - dc->hwss.apply_ctx_to_hw(dc, context); + + if (dc->hwss.disable_phantom_streams) + dc->hwss.disable_phantom_streams(dc, context); } if (update_type != UPDATE_TYPE_FAST) @@ -4382,6 +4419,8 @@ bool dc_update_planes_and_stream(struct dc *dc, bool is_plane_addition = 0; bool is_fast_update_only; + dc_exit_ips_for_hw_access(dc); + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream); @@ -4502,6 +4541,8 @@ void dc_commit_updates_for_stream(struct dc *dc, int i, j; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + dc_exit_ips_for_hw_access(dc); + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -4686,6 +4727,8 @@ void dc_set_power_state( case DC_ACPI_CM_POWER_STATE_D0: dc_state_construct(dc, dc->current_state); + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); dc->hwss.init_hw(dc); @@ -4827,6 +4870,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } +void dc_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->caps.ips_support) + dc_allow_idle_optimizations(dc, false); +} + bool dc_dmub_is_ips_idle_state(struct dc *dc) { uint32_t idle_state = 0; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 9fbdb09697fd..1b7765bc5e5e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1834,23 +1834,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx, bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type) { -#ifdef DBG - if (pipe_ctx->stream == NULL) { - /* a free pipe with dangling states */ - ASSERT(!pipe_ctx->plane_state); - ASSERT(!pipe_ctx->prev_odm_pipe); - ASSERT(!pipe_ctx->next_odm_pipe); - ASSERT(!pipe_ctx->top_pipe); - ASSERT(!pipe_ctx->bottom_pipe); - } else if (pipe_ctx->top_pipe) { - /* a secondary DPP pipe must be signed to a plane */ - ASSERT(pipe_ctx->plane_state) - } - /* Add more checks here to prevent corrupted pipe ctx. It is very hard - * to debug this issue afterwards because we can't pinpoint the code - * location causing inconsistent pipe context states. - */ -#endif switch (type) { case OTG_MASTER: return !pipe_ctx->prev_odm_pipe && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 88c6436b28b6..180ac47868c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state) dc_stream_release(state->phantom_streams[i]); state->phantom_streams[i] = NULL; } + state->phantom_stream_count = 0; for (i = 0; i < state->phantom_plane_count; i++) { dc_plane_state_release(state->phantom_planes[i]); state->phantom_planes[i] = NULL; } + state->phantom_plane_count = 0; + state->stream_mask = 0; memset(&state->res_ctx, 0, sizeof(state->res_ctx)); memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 54670e0b1518..51a970fcb5d0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -423,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc, return false; } + dc_exit_ips_for_hw_access(dc); + wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; @@ -493,6 +495,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc, return false; } + dc_exit_ips_for_hw_access(dc); + if (dwb->funcs->set_fc_enable) dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); @@ -542,6 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc, return false; } + dc_exit_ips_for_hw_access(dc); + /* disable writeback */ if (dc->hwss.disable_writeback) { struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; @@ -557,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc, int num_dwb, struct dc_writeback_info *wb_info) { + dc_exit_ips_for_hw_access(dc); + if (dc->hwss.mmhubbub_warmup) return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); else @@ -569,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -597,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream, dc = stream->ctx->dc; res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; @@ -628,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -664,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; + dc_exit_ips_for_hw_access(dc); + return dc->hwss.dmdata_status_done(pipe); } @@ -698,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; + dc_exit_ips_for_hw_access(dc); + dc->hwss.program_dmdata_engine(pipe_ctx); if (hubp->funcs->dmdata_set_attributes != NULL && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 19a2c7140ae8..19140fb65787 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -161,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status( break; } + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c9317ea0258e..9b42f6fc8c69 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.266" +#define DC_VER "3.2.272" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -429,12 +429,12 @@ struct dc_config { bool force_bios_enable_lttpr; uint8_t force_bios_fixed_vs; int sdpif_request_limit_words_per_umc; - bool use_old_fixed_vs_sequence; bool dc_mode_clk_limit_support; bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; unsigned int disable_ips_in_vpb; + bool usb4_bw_alloc_support; }; enum visual_confirm { @@ -987,9 +987,11 @@ struct dc_debug_options { bool psp_disabled_wa; unsigned int ips2_eval_delay_us; unsigned int ips2_entry_delay_us; + bool disable_dmub_reallow_idle; bool disable_timeout; bool disable_extblankadj; unsigned int static_screen_wait_frames; + bool force_chroma_subsampling_1tap; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1068,6 +1070,7 @@ struct dc { } scratch; struct dml2_configuration_options dml2_options; + enum dc_acpi_cm_power_state power_state; }; enum frame_buffer_mode { @@ -1249,6 +1252,7 @@ union surface_update_flags { uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; + uint32_t clip_size_change: 1; uint32_t position_change:1; uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; @@ -1568,7 +1572,19 @@ struct dc_link { enum engine_id dpia_preferred_eng_id; bool test_pattern_enabled; + /* Pending/Current test pattern are only used to perform and track + * FIXED_VS retimer test pattern/lane adjustment override state. + * Pending allows link HWSS to differentiate PHY vs non-PHY pattern, + * to perform specific lane adjust overrides before setting certain + * PHY test patterns. In cases when lane adjust and set test pattern + * calls are not performed atomically (i.e. performing link training), + * pending_test_pattern will be invalid or contain a non-PHY test pattern + * and current_test_pattern will contain required context for any future + * set pattern/set lane adjust to transition between override state(s). + * */ enum dp_test_pattern current_test_pattern; + enum dp_test_pattern pending_test_pattern; + union compliance_test_state compliance_test_state; void *priv; @@ -2219,11 +2235,9 @@ struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; -#if defined(CONFIG_DRM_AMD_DC_FP) // 'true' if MST topology supports DSC passthrough for sink // 'false' if MST topology does not support DSC passthrough bool is_dsc_passthrough_supported; -#endif struct dsc_dec_dpcd_caps dsc_dec_caps; }; @@ -2325,6 +2339,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_ struct dc_cursor_attributes *cursor_attr); void dc_allow_idle_optimizations(struct dc *dc, bool allow); +void dc_exit_ips_for_hw_access(struct dc *dc); bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 2b79a0e5638e..6083b1dcf050 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -74,7 +74,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status; - status = dmub_srv_wait_for_idle(dmub, 100000); + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); + if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); @@ -125,7 +128,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list) { - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; int i; @@ -133,6 +136,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; + dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; for (i = 0 ; i < count; i++) { @@ -145,7 +149,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, if (status == DMUB_STATUS_POWER_STATE_D3) return false; - dmub_srv_wait_for_idle(dmub, 100000); + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); /* Requeue the command. */ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); @@ -186,7 +192,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { - status = dmub_srv_wait_for_idle(dmub, 100000); + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); @@ -780,21 +788,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, } else if (subvp_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; } else { - pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0; + pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; } // Find phantom pipe index based on phantom stream for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; - if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && + phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; if (phantom_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; } else if (phantom_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; } else { - pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0; + pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; } break; } @@ -1161,7 +1170,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dc_context *dc_ctx; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) @@ -1170,6 +1179,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return true; + dc_ctx = dc_dmub_srv->ctx; + if (wait) { if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { do { @@ -1190,11 +1201,17 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) { + struct dc_dmub_srv *dc_dmub_srv; union dmub_rb_cmd cmd = {0}; if (dc->debug.dmcub_emulation) return; + if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) + return; + + dc_dmub_srv = dc->ctx->dmub_srv; + memset(&cmd, 0, sizeof(cmd)); cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; @@ -1205,19 +1222,42 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; if (allow_idle) { - if (dc->hwss.set_idle_state) - dc->hwss.set_idle_state(dc, true); + volatile struct dmub_shared_state_ips_driver *ips_driver = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; + union dmub_shared_state_ips_driver_signals new_signals; + + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + + memset(&new_signals, 0, sizeof(new_signals)); + + if (dc->config.disable_ips == DMUB_IPS_ENABLE || + dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { + new_signals.bits.allow_ips1 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + } + + ips_driver->signals = new_signals; } /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ /* We also do not perform a wait since DMCUB could enter idle after the notification. */ - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); } static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { - uint32_t allow_state = 0; - uint32_t commit_state = 0; + struct dc_dmub_srv *dc_dmub_srv; if (dc->debug.dmcub_emulation) return; @@ -1225,61 +1265,44 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return; - if (dc->hwss.get_idle_state && - dc->hwss.set_idle_state && - dc->clk_mgr->funcs->exit_low_power_state) { + dc_dmub_srv = dc->ctx->dmub_srv; - allow_state = dc->hwss.get_idle_state(dc); - dc->hwss.set_idle_state(dc, false); + if (dc->clk_mgr->funcs->exit_low_power_state) { + volatile const struct dmub_shared_state_ips_fw *ips_fw = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; + volatile struct dmub_shared_state_ips_driver *ips_driver = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; + union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; - if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) { - // Wait for evaluation time - for (;;) { - udelay(dc->debug.ips2_eval_delay_us); - commit_state = dc->hwss.get_idle_state(dc); - if (commit_state & DMUB_IPS2_ALLOW_MASK) - break; + ips_driver->signals.all = 0; - /* allow was still set, retry eval delay */ - dc->hwss.set_idle_state(dc, false); - } + if (prev_driver_signals.bits.allow_ips2) { + udelay(dc->debug.ips2_eval_delay_us); - if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) { + if (ips_fw->signals.bits.ips2_commit) { // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); // Wait for IPS2 entry upper bound udelay(dc->debug.ips2_entry_delay_us); - dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - for (;;) { - commit_state = dc->hwss.get_idle_state(dc); - if (commit_state & DMUB_IPS2_COMMIT_MASK) - break; + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + while (ips_fw->signals.bits.ips2_commit) udelay(1); - } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - /* TODO: See if we can return early here - IPS2 should go - * back directly to IPS0 and clear the flags, but it will - * be safer to directly notify DMCUB of this. - */ - allow_state = dc->hwss.get_idle_state(dc); + dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); } } dc_dmub_srv_notify_idle(dc, false); - if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) { - for (;;) { - commit_state = dc->hwss.get_idle_state(dc); - if (commit_state & DMUB_IPS1_COMMIT_MASK) - break; - + if (prev_driver_signals.bits.allow_ips1) { + while (ips_fw->signals.bits.ips1_commit) udelay(1); - } + } } @@ -1361,7 +1384,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in else result = dm_execute_dmub_cmd(ctx, cmd, wait_type); - if (result && reallow_idle) + if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; @@ -1410,7 +1433,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); - if (result && reallow_idle) + if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 811474f4419b..aae2f3a2660d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -827,9 +827,7 @@ struct dc_dsc_config { uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */ bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ -#if defined(CONFIG_DRM_AMD_DC_FP) bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ -#endif bool is_dp; /* indicate if DSC is applied based on DP's capability */ uint32_t mst_pbn; /* pbn of display on dsc mst hub */ const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */ @@ -942,6 +940,7 @@ struct dc_crtc_timing { uint32_t hdmi_vic; uint32_t rid; uint32_t fr_index; + uint32_t frl_uncompressed_video_bandwidth_in_kbps; enum dc_timing_3d_format timing_3d_format; enum dc_color_depth display_color_depth; enum dc_pixel_encoding pixel_encoding; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index f0458b8f00af..12f3c35b3a34 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi( } } } +static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_LOW: + return dc_fixpt_from_int(162); /* 162 MHz */ + case LINK_RATE_HIGH: + return dc_fixpt_from_int(270); /* 270 MHz */ + case LINK_RATE_HIGH2: + return dc_fixpt_from_int(540); /* 540 MHz */ + case LINK_RATE_HIGH3: + return dc_fixpt_from_int(810); /* 810 MHz */ + case LINK_RATE_UHBR10: + return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */ + case LINK_RATE_UHBR13_5: + return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */ + case LINK_RATE_UHBR20: + return dc_fixpt_from_int(625); /* 625 MHz */ + default: + /* Unexpected case, this requires debug if encountered. */ + ASSERT(0); + return dc_fixpt_from_int(0); + } +} + +struct dp_audio_layout_config { + uint8_t layouts_per_sample_denom; + uint8_t symbols_per_layout; + uint8_t max_layouts_per_audio_sdp; +}; + +static void get_audio_layout_config( + uint32_t channel_count, + enum dp_link_encoding encoding, + struct dp_audio_layout_config *output) +{ + /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP, + * with each layout being the same size (8ch layout). + */ + if (encoding == DP_8b_10b_ENCODING) { + if (channel_count == 2) { + output->layouts_per_sample_denom = 4; + output->symbols_per_layout = 40; + output->max_layouts_per_audio_sdp = 1; + } else if (channel_count == 8 || channel_count == 6) { + output->layouts_per_sample_denom = 1; + output->symbols_per_layout = 40; + output->max_layouts_per_audio_sdp = 1; + } + } else if (encoding == DP_128b_132b_ENCODING) { + if (channel_count == 2) { + output->layouts_per_sample_denom = 4; + output->symbols_per_layout = 10; + output->max_layouts_per_audio_sdp = 1; + } else if (channel_count == 8 || channel_count == 6) { + output->layouts_per_sample_denom = 1; + output->symbols_per_layout = 10; + output->max_layouts_per_audio_sdp = 1; + } + } +} -/*For DP SST, calculate if specified sample rates can fit into a given timing */ -static void check_audio_bandwidth_dpsst( +static uint32_t get_av_stream_map_lane_count( + enum dp_link_encoding encoding, + enum dc_lane_count lane_count, + bool is_mst) +{ + uint32_t av_stream_map_lane_count = 0; + + if (encoding == DP_8b_10b_ENCODING) { + if (!is_mst) + av_stream_map_lane_count = lane_count; + else + av_stream_map_lane_count = 4; + } else if (encoding == DP_128b_132b_ENCODING) { + av_stream_map_lane_count = 4; + } + + ASSERT(av_stream_map_lane_count != 0); + + return av_stream_map_lane_count; +} + +static uint32_t get_audio_sdp_overhead( + enum dp_link_encoding encoding, + enum dc_lane_count lane_count, + bool is_mst) +{ + uint32_t audio_sdp_overhead = 0; + + if (encoding == DP_8b_10b_ENCODING) { + if (is_mst) + audio_sdp_overhead = 16; /* 4 * 2 + 8 */ + else + audio_sdp_overhead = lane_count * 2 + 8; + } else if (encoding == DP_128b_132b_ENCODING) { + audio_sdp_overhead = 10; /* 4 x 2.5 */ + } + + ASSERT(audio_sdp_overhead != 0); + + return audio_sdp_overhead; +} + +static uint32_t calculate_required_audio_bw_in_symbols( const struct audio_crtc_info *crtc_info, + const struct dp_audio_layout_config *layout_config, uint32_t channel_count, - union audio_sample_rates *sample_rates) + uint32_t sample_rate_hz, + uint32_t av_stream_map_lane_count, + uint32_t audio_sdp_overhead) +{ + /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */ + struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100); + struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction( + crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10); + struct fixed31_32 samples_per_line; + struct fixed31_32 layouts_per_line; + struct fixed31_32 symbols_per_sdp_max_layout; + struct fixed31_32 remainder; + uint32_t num_sdp_with_max_layouts; + uint32_t required_symbols_per_hblank; + + samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000); + samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz); + layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom); + + num_sdp_with_max_layouts = dc_fixpt_floor( + dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp)); + symbols_per_sdp_max_layout = dc_fixpt_from_int( + layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout); + symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead); + symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin); + required_symbols_per_hblank = num_sdp_with_max_layouts; + required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) / + av_stream_map_lane_count) * av_stream_map_lane_count; + + if (num_sdp_with_max_layouts != dc_fixpt_ceil( + dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) { + remainder = dc_fixpt_sub_int(layouts_per_line, + num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp); + remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout); + remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead); + remainder = dc_fixpt_mul(remainder, audio_sdp_margin); + required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) / + av_stream_map_lane_count) * av_stream_map_lane_count; + } + + return required_symbols_per_hblank; +} + +/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST. + */ +static uint32_t calculate_available_hblank_bw_in_symbols( + const struct audio_crtc_info *crtc_info, + const struct audio_dp_link_info *dp_link_info) { - /* do nothing */ + uint64_t hblank = crtc_info->h_total - crtc_info->h_active; + struct fixed31_32 hblank_time_msec = + dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz); + struct fixed31_32 lsclkfreq_mhz = + get_link_symbol_clk_freq_mhz(dp_link_info->link_rate); + struct fixed31_32 average_stream_sym_bw_frac; + struct fixed31_32 peak_stream_bw_kbps; + struct fixed31_32 bits_per_pixel; + struct fixed31_32 link_bw_kbps; + struct fixed31_32 available_stream_sym_count; + uint32_t available_hblank_bw = 0; /* in stream symbols */ + + if (crtc_info->dsc_bits_per_pixel) { + bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16); + } else { + switch (crtc_info->color_depth) { + case COLOR_DEPTH_666: + bits_per_pixel = dc_fixpt_from_int(6); + break; + case COLOR_DEPTH_888: + bits_per_pixel = dc_fixpt_from_int(8); + break; + case COLOR_DEPTH_101010: + bits_per_pixel = dc_fixpt_from_int(10); + break; + case COLOR_DEPTH_121212: + bits_per_pixel = dc_fixpt_from_int(12); + break; + default: + /* Default to commonly supported color depth. */ + bits_per_pixel = dc_fixpt_from_int(8); + break; + } + + bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3); + + if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3); + bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2); + } else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2); + } + } + + /* Use simple stream BW calculation because mainlink overhead is + * accounted for separately in the audio BW calculations. + */ + peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10); + peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel); + link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps); + average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps); + + available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000); + available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz); + available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac); + available_hblank_bw = dc_fixpt_floor(available_stream_sym_count); + available_hblank_bw *= dp_link_info->lane_count; + available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */ + + if (available_hblank_bw < dp_link_info->hblank_min_symbol_width) + available_hblank_bw = dp_link_info->hblank_min_symbol_width; + + if (available_hblank_bw < 12) + available_hblank_bw = 0; + else + available_hblank_bw -= 12; /* Main link overhead */ + + return available_hblank_bw; } -/*For DP MST, calculate if specified sample rates can fit into a given timing */ -static void check_audio_bandwidth_dpmst( +static void check_audio_bandwidth_dp( const struct audio_crtc_info *crtc_info, + const struct audio_dp_link_info *dp_link_info, uint32_t channel_count, union audio_sample_rates *sample_rates) { - /* do nothing */ + struct dp_audio_layout_config layout_config = {0}; + uint32_t available_hblank_bw; + uint32_t av_stream_map_lane_count; + uint32_t audio_sdp_overhead; + + /* TODO: Add validation for SST 8b/10 case */ + if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING) + return; + + available_hblank_bw = calculate_available_hblank_bw_in_symbols( + crtc_info, dp_link_info); + av_stream_map_lane_count = get_av_stream_map_lane_count( + dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst); + audio_sdp_overhead = get_audio_sdp_overhead( + dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst); + get_audio_layout_config( + channel_count, dp_link_info->encoding, &layout_config); + + if (layout_config.max_layouts_per_audio_sdp == 0 || + layout_config.symbols_per_layout == 0 || + layout_config.layouts_per_sample_denom == 0) { + return; + } + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 192000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_192 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 176400, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_176_4 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 96000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_96 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 88200, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_88_2 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 48000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_48 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 44100, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_44_1 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 32000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_32 = 0; } static void check_audio_bandwidth( const struct audio_crtc_info *crtc_info, + const struct audio_dp_link_info *dp_link_info, uint32_t channel_count, enum signal_type signal, union audio_sample_rates *sample_rates) @@ -271,12 +538,9 @@ static void check_audio_bandwidth( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: - check_audio_bandwidth_dpsst( - crtc_info, channel_count, sample_rates); - break; case SIGNAL_TYPE_DISPLAY_PORT_MST: - check_audio_bandwidth_dpmst( - crtc_info, channel_count, sample_rates); + check_audio_bandwidth_dp( + crtc_info, dp_link_info, channel_count, sample_rates); break; default: break; @@ -394,7 +658,8 @@ void dce_aud_az_configure( struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, - const struct audio_info *audio_info) + const struct audio_info *audio_info, + const struct audio_dp_link_info *dp_link_info) { struct dce_audio *aud = DCE_AUD(audio); @@ -529,6 +794,7 @@ void dce_aud_az_configure( check_audio_bandwidth( crtc_info, + dp_link_info, channel_count, signal, &sample_rates); @@ -588,6 +854,7 @@ void dce_aud_az_configure( check_audio_bandwidth( crtc_info, + dp_link_info, 8, signal, &sample_rate); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index dbd2cfed0603..539f881928d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio); void dce_aud_az_configure(struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, - const struct audio_info *audio_info); + const struct audio_info *audio_info, + const struct audio_dp_link_info *dp_link_info); void dce_aud_wall_dto_setup(struct audio *audio, enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e8570060d007..5bca67407c5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -290,4 +290,5 @@ void dce_panel_cntl_construct( dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs; dce_panel_cntl->base.ctx = init_data->ctx; dce_panel_cntl->base.inst = init_data->inst; + dce_panel_cntl->base.pwrseq_inst = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index ba1fec3016d5..bf636b28e3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link) { if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; + + if (link->replay_settings.replay_feature_enabled) + return true; + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 38e4797e9476..b010814706fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -258,7 +258,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, *residency = 0; } -/** +/* * Set REPLAY power optimization flags and coasting vtotal. */ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, @@ -280,7 +280,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } -/** +/* * send Replay general cmd to DMUB. */ static void dmub_replay_send_cmd(struct dmub_replay *dmub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 3538973bd0c6..b7e57aa27361 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -62,6 +62,26 @@ void cm_helper_program_color_matrices( } +void cm_helper_read_color_matrices(struct dc_context *ctx, + uint16_t *regval, + const struct color_matrices_reg *reg) +{ + uint32_t cur_csc_reg, regval0, regval1; + unsigned int i = 0; + + for (cur_csc_reg = reg->csc_c11_c12; + cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) { + REG_GET_2(cur_csc_reg, + csc_c11, ®val0, + csc_c12, ®val1); + + regval[2 * i] = regval0; + regval[(2 * i) + 1] = regval1; + + i++; + } +} + void cm_helper_program_xfer_func( struct dc_context *ctx, const struct pwl_params *params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 0a68b63d6126..decc50b1ac53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h @@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params); - +void cm_helper_read_color_matrices(struct dc_context *ctx, + uint16_t *regval, + const struct color_matrices_reg *reg); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index ef52e6b6eccf..4e391fd1d71c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, .dpp_program_blnd_lut = NULL, .dpp_program_shaper_lut = NULL, - .dpp_program_3dlut = NULL + .dpp_program_3dlut = NULL, + .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap, }; static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index c9e045666dcc..a039eedc7c24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1521,4 +1521,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1, const struct dcn_dpp_registers *tf_regs, const struct dcn_dpp_shift *tf_shift, const struct dcn_dpp_mask *tf_mask); + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 904c2d278998..2f994a3a0b9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -98,7 +98,7 @@ static void program_gamut_remap( if (regval == NULL || select == GAMUT_REMAP_BYPASS) { REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); + CM_GAMUT_REMAP_MODE, 0); return; } switch (select) { @@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap( } } +static void read_gamut_remap(struct dcn10_dpp *dpp, + uint16_t *regval, + enum gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMB_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + uint16_t arr_reg_val[12]; + enum gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + static void dpp1_cm_program_color_matrix( struct dcn10_dpp *dpp, const uint16_t *regval) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0dec57679269..48a40dcc7050 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -23,6 +23,7 @@ * */ +#include "core_types.h" #include "dm_services.h" #include "dcn10_opp.h" #include "reg_helper.h" @@ -160,6 +161,9 @@ static void opp1_set_pixel_encoding( struct dcn10_opp *oppn10, const struct clamping_and_pixel_encoding_params *params) { + bool force_chroma_subsampling_1tap = + oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap; + switch (params->pixel_encoding) { case PIXEL_ENCODING_RGB: @@ -178,6 +182,9 @@ static void opp1_set_pixel_encoding( default: break; } + + if (force_chroma_subsampling_1tap) + REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index eaa7032f0f1a..1516c0a48726 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base, REG_GET(DPP_CONTROL, DPP_CLOCK_ENABLE, &s->is_enabled); + + // Degamma LUT (RAM) REG_GET(CM_DGAM_CONTROL, - CM_DGAM_LUT_MODE, &s->dgam_lut_mode); - // BGAM has no ROM, and definition is different, can't reuse same dump - //REG_GET(CM_BLNDGAM_CONTROL, - // CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode); - REG_GET(CM_GAMUT_REMAP_CONTROL, - CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); - if (s->gamut_remap_mode) { - s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); - s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); - s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); - s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); - s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); - s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); - } + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode); } void dpp2_power_on_obuf( @@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, }; static struct dpp_caps dcn20_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index e735363d0051..672cde46c4b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2, void dpp2_power_on_obuf( struct dpp *dpp_base, bool power_on); + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 598caa508d43..58dc69926e8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap( } } +static void read_gamut_remap(struct dcn20_dpp *dpp, + uint16_t *regval, + enum dcn20_gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == DCN2_GAMUT_REMAP_COEF_B) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + uint16_t arr_reg_val[12]; + enum dcn20_gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == DCN2_GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + void dpp2_program_input_csc( struct dpp *dpp_base, enum dc_color_space color_space, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5da6e44f284a..16b5ff208d14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -542,8 +542,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) return NULL; } +static void mpc2_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); + + /* Gamma block state */ + REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst], + MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode); +} + static const struct mpc_funcs dcn20_mpc_funcs = { - .read_mpcc_state = mpc1_read_mpcc_state, + .read_mpcc_state = mpc2_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc1_mpc_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c index a7268027a472..f809a7d21033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c @@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, }; static struct dpp_caps dcn201_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 11f7746f3a65..a3a769aad042 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -44,12 +44,45 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t gamcor_lut_mode, rgam_lut_mode; REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); + DPP_CLOCK_ENABLE, &s->is_enabled); + + // Pre-degamma (ROM) + REG_GET_2(PRE_DEGAM, + PRE_DEGAM_MODE, &s->pre_dgam_mode, + PRE_DEGAM_SELECT, &s->pre_dgam_select); + + // Gamma Correction (RAM) + REG_GET(CM_GAMCOR_CONTROL, + CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); + if (s->gamcor_mode) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); + if (!gamcor_lut_mode) + s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } - // TODO: Implement for DCN3 + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_CONTROL, + CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); + if (s->rgam_lut_mode){ + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); + if (!rgam_lut_mode) + s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } } + /*program post scaler scs block in dpp CM*/ void dpp3_program_post_csc( struct dpp *dpp_base, @@ -1462,6 +1495,7 @@ static struct dpp_funcs dcn30_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h index cea3208e4ab1..2ac8045a87a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h @@ -637,4 +637,6 @@ void dpp3_program_cm_dealpha( struct dpp *dpp_base, uint32_t enable, uint32_t additive_blending); +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index e43f77c11c00..2f5b3fbd3507 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -56,16 +56,13 @@ static void dpp3_enable_cm_block( static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) { - enum dc_lut_mode mode; + enum dc_lut_mode mode = LUT_BYPASS; uint32_t state_mode; uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - if (state_mode == 0) - mode = LUT_BYPASS; - if (state_mode == 2) {//Programmable RAM LUT REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); if (lut_mode == 0) @@ -408,3 +405,57 @@ void dpp3_cm_set_gamut_remap( program_gamut_remap(dpp, arr_reg_val, gamut_mode); } } + +static void read_gamut_remap(struct dcn3_dpp *dpp, + uint16_t *regval, + int *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + //current coefficient set in use + REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint16_t arr_reg_val[12]; + int select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index d1500b223858..bf3386cd444d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1129,6 +1129,64 @@ void mpc3_set_gamut_remap( } } +static void read_gamut_remap(struct dcn30_mpc *mpc30, + int mpcc_id, + uint16_t *regval, + uint32_t *select) +{ + struct color_matrices_reg gam_regs; + + //current coefficient set in use + REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select); + + gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; + gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; + gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; + gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); + + cm_helper_read_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); + + cm_helper_read_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } + +} + +void mpc3_get_gamut_remap(struct mpc *mpc, + int mpcc_id, + struct mpc_grph_gamut_adjustment *adjust) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint16_t arr_reg_val[12]; + int select; + + read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + bool mpc3_program_3dlut( struct mpc *mpc, const struct tetrahedral_params *params, @@ -1382,8 +1440,54 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) } } +static void mpc3_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint32_t rmu_status = 0xf; + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); + + /* Color blocks state */ + REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status); + + if (rmu_status == mpcc_inst) { + REG_GET(SHAPER_CONTROL[0], + MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); + REG_GET(RMU_3DLUT_MODE[0], + MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0], + MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(RMU_3DLUT_MODE[0], + MPC_RMU_3DLUT_SIZE, &s->lut3d_size); + } else { + REG_GET(SHAPER_CONTROL[1], + MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); + REG_GET(RMU_3DLUT_MODE[1], + MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1], + MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(RMU_3DLUT_MODE[1], + MPC_RMU_3DLUT_SIZE, &s->lut3d_size); + } + + REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], + MPCC_OGAM_MODE_CURRENT, &s->rgam_mode, + MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); +} + static const struct mpc_funcs dcn30_mpc_funcs = { - .read_mpcc_state = mpc1_read_mpcc_state, + .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc1_mpc_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h index 5198f2167c7c..9cb96ae95a2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h @@ -1056,6 +1056,10 @@ void mpc3_set_gamut_remap( int mpcc_id, const struct mpc_grph_gamut_adjustment *adjust); +void mpc3_get_gamut_remap(struct mpc *mpc, + int mpcc_id, + struct mpc_grph_gamut_adjustment *adjust); + void mpc3_set_rmu_mux( struct mpc *mpc, int rmu_idx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index ad0df1a72a90..9e96a3ace207 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct( dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs; dcn301_panel_cntl->base.ctx = init_data->ctx; dcn301_panel_cntl->base.inst = init_data->inst; + dcn301_panel_cntl->base.pwrseq_inst = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 03248422d6ff..281be20b1a10 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct( struct dcn31_panel_cntl *dcn31_panel_cntl, const struct panel_cntl_init_data *init_data) { + uint8_t pwrseq_inst = 0xF; + dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs; dcn31_panel_cntl->base.ctx = init_data->ctx; dcn31_panel_cntl->base.inst = init_data->inst; - dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst; + + switch (init_data->eng_id) { + case ENGINE_ID_DIGA: + pwrseq_inst = 0; + break; + case ENGINE_ID_DIGB: + pwrseq_inst = 1; + break; + default: + DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id); + ASSERT(false); + break; + } + + dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c index dcf12a0b031c..681e75c6dbaf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c @@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 4229369c57f4..f4d3f04ec857 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -26,6 +26,9 @@ #ifndef DM_CP_PSP_IF__H #define DM_CP_PSP_IF__H +/* + * Interface to CPLIB/PSP to enable ASSR + */ struct dc_link; struct cp_psp_stream_config { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 6042a5a6a44f..59ade76ffb18 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 63c48c29ba49..e7f4a2d491cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Swath, DET Configuration, DCFCLKDeepSleep // - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k < v->NumberOfActivePlanes; ++k) { v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k]; @@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Return BW - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (v->BlendingAndTiming[k] == k) { @@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->UrgentOutOfOrderReturnPerChannelVMDataOnly); v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency); - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { v->DCFCLKState[i][j] = v->DCFCLKPerState[i]; } @@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if (v->ClampMinDCFCLK) { /* Clamp calculated values to actual minimum */ - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) { v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk; @@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { v->IdealSDPPortBandwidthPerState[i][j] = dml_min3( v->ReturnBusWidth * v->DCFCLKState[i][j], @@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Re-ordering Buffer Support Check - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j] > (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) { @@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]; } - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min( v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100, @@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Prefetch Check - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { int NextPrefetchModeState = MinPrefetchMode; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c index 3eb3a021ab7d..3f02bb806d42 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c @@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p optimal_uclk_for_dcfclk_sta_targets[i] = bw_params->clk_table.entries[j].memclk_mhz * 16; break; + } else { + /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]): + * This is required for dcn303 because it just so happens that the memory + * bandwidth is low enough such that all the optimal DCFCLK for each UCLK + * is lower than the smallest DCFCLK STA target. In this case we need to + * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK. + */ + if (j == num_uclk_states - 1) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + } } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index dd781a20692e..a0a65e099104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex return updated; } -static bool should_allow_odm_power_optimization(struct dc *dc, +static bool should_apply_odm_power_optimization(struct dc *dc, struct dc_state *context, struct vba_vars_st *v, int *split, bool *merge) { @@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate( { int i; unsigned int new_vlevel; + unsigned int cur_policy[MAX_PIPES]; - for (i = 0; i < pipe_cnt; i++) + for (i = 0; i < pipe_cnt; i++) { + cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy; pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + } new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); @@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate( memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge); context->bw_ctx.dml.vba.VoltageLevel = *vlevel; + } else { + for (i = 0; i < pipe_cnt; i++) + pipes[i].pipe.dest.odm_combine_policy = cur_policy[i]; } } @@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } - if (should_allow_odm_power_optimization(dc, context, vba, split, merge)) + if (should_apply_odm_power_optimization(dc, context, vba, split, merge)) try_odm_power_optimization_and_revalidate( dc, context, pipes, split, merge, vlevel, *pipe_cnt); @@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, int i; pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); + if (!dc->config.enable_windowed_mpo_odm) + dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); /* repopulate_pipes = 1 means the pipes were either split or merged. In this case * we have to re-calculate the DET allocation and run through DML once more to @@ -2753,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk struct _vcs_dpi_voltage_scaling_st entry = {0}; struct clk_limit_table_entry max_clk_data = {0}; - unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599; + unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; static const unsigned int num_dcfclk_stas = 5; unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 7ea2bd5374d5..80bebfc268db 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -583,12 +583,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) plane_count++; } - if (plane_count == 0) { + if (context->stream_count == 0 || plane_count == 0) { support = DCN_ZSTATE_SUPPORT_ALLOW; - } else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { + } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; bool is_pwrseq0 = link && link->link_index == 0; - bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr; + bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); + bool is_replay = link && link->replay_settings.replay_feature_enabled; int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; @@ -596,12 +598,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000; bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency; + /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/ if (is_pwrseq0 && allow_z10) support = DCN_ZSTATE_SUPPORT_ALLOW; - else if (is_pwrseq0 && is_psr1) + else if (is_pwrseq0 && (is_psr || is_replay)) support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; else if (allow_z8) support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; + } context->bw_ctx.bw.dcn.clk.zstate_support = support; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 0baf39d64a2d..a52c594e1ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -141,14 +141,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx, { int i; unsigned int num_found = 0; - unsigned int plane_id_assigned_to_pipe; + unsigned int plane_id_assigned_to_pipe = -1; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state, - state->res_ctx.pipe_ctx[i].stream->stream_id, - ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) { - if (plane_id_assigned_to_pipe == plane_id) - pipes[num_found++] = i; + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state || !pipe->stream) + continue; + + get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id, + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx], + &plane_id_assigned_to_pipe); + if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe + && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) { + while (pipe) { + struct pipe_ctx *mpc_pipe = pipe; + + while (mpc_pipe) { + pipes[num_found++] = mpc_pipe->pipe_idx; + mpc_pipe = mpc_pipe->bottom_pipe; + if (!mpc_pipe) + break; + if (mpc_pipe->plane_state != pipe->plane_state) + mpc_pipe = NULL; + } + pipe = pipe->next_odm_pipe; + } + break; } } @@ -566,8 +585,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru unsigned int num_found = 0; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) { - pipes[num_found++] = i; + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) { + while (pipe) { + pipes[num_found++] = pipe->pipe_idx; + pipe = pipe->next_odm_pipe; + } + break; } } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 23a608274096..1ba6933d2b36 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -398,7 +398,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, /* Copy clocks tables entries, if available */ if (dml2->config.bbox_overrides.clks_table.num_states) { p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; - for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) { p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz; } @@ -437,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } dml2_policy_build_synthetic_soc_states(s, p); + if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 || + dml2->v20.dml_core_ctx.project == dml_project_dcn351) { + // Override last out_state with data from last in_state + // This will ensure that out_state contains max fclk + memcpy(&p->out_states->state_array[p->out_states->num_states - 1], + &p->in_states->state_array[p->in_states->num_states - 1], + sizeof(struct soc_state_bounding_box_st)); + } } void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 1068b962d1c1..f15d1dbad6a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -234,7 +234,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state if (state->streams[i]->stream_id == stream_id) { for (j = 0; j < state->stream_status[i].plane_count; j++) { if (state->stream_status[i].plane_states[j] == plane && - (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { + (!is_plane_duplicate || (j == plane_index))) { *plane_id = (i << 16) | j; return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0df6c55eb326..ac41f9c0a283 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) kbps = apply_128b_132b_stream_overhead(timing, kbps); + if (link_encoding == DC_LINK_ENCODING_HDMI_FRL && + timing->vic == 0 && timing->hdmi_vic == 0 && + timing->frl_uncompressed_video_bandwidth_in_kbps != 0) + kbps = timing->frl_uncompressed_video_bandwidth_in_kbps; + return kbps; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 2352428bcea3..9d5df4c0da59 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1291,6 +1291,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id) } } +static void populate_audio_dp_link_info( + const struct pipe_ctx *pipe_ctx, + struct audio_dp_link_info *dp_link_info) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + const struct dc_link *link = stream->link; + struct fixed31_32 link_bw_kbps; + + dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format( + &pipe_ctx->link_config.dp_link_settings); + dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST); + dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count; + dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate; + + link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link, + &pipe_ctx->link_config.dp_link_settings)); + + /* For audio stream calculations, the video stream should not include FEC or SSC + * in order to get the most pessimistic values. + */ + if (dp_link_info->encoding == DP_8b_10b_ENCODING && + link->dc->link_srv->dp_is_fec_supported(link)) { + link_bw_kbps = dc_fixpt_mul(link_bw_kbps, + dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100)); + } else if (dp_link_info->encoding == DP_128b_132b_ENCODING) { + link_bw_kbps = dc_fixpt_mul(link_bw_kbps, + dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/ + } + + dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps); + + /* HW minimum for 128b/132b HBlank is 4 frame symbols. + * TODO: Plumb the actual programmed HBlank min symbol width to here. + */ + if (dp_link_info->encoding == DP_128b_132b_ENCODING) + dp_link_info->hblank_min_symbol_width = 4; + else + dp_link_info->hblank_min_symbol_width = 0; +} + static void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, @@ -1338,6 +1378,15 @@ static void build_audio_output( audio_output->crtc_info.calculated_pixel_clock_100Hz = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; + audio_output->crtc_info.pixel_encoding = + stream->timing.pixel_encoding; + + audio_output->crtc_info.dsc_bits_per_pixel = + stream->timing.dsc_cfg.bits_per_pixel; + + audio_output->crtc_info.dsc_num_slices = + stream->timing.dsc_cfg.num_slices_h; + /*for HDMI, audio ACR is with deep color ratio factor*/ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && audio_output->crtc_info.requested_pixel_clock_100Hz == @@ -1371,6 +1420,10 @@ static void build_audio_output( audio_output->pll_info.ss_percentage = pipe_ctx->pll_settings.ss_percentage; + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) { + populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info); + } } static void program_scaler(const struct dc *dc, @@ -1476,7 +1529,7 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static enum dc_status apply_single_controller_ctx_to_hw( +enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -1507,7 +1560,8 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, &audio_output.crtc_info, - &pipe_ctx->stream->audio_info); + &pipe_ctx->stream->audio_info, + &audio_output.dp_link_info); } /* make sure no pipes syncd to the pipe being enabled */ @@ -2302,7 +2356,7 @@ enum dc_status dce110_apply_ctx_to_hw( if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; - status = apply_single_controller_ctx_to_hw( + status = dce110_apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 08028a1779ae..ed3cc3648e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_single_controller_ctx_to_hw( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); void dce110_enable_stream(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 6dd479e8a348..314798400b16 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -283,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) DTN_INFO("\n"); } -void dcn10_log_hw_state(struct dc *dc, - struct dc_log_buffer_ctx *log_ctx) +static void dcn10_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; int i; - DTN_INFO_BEGIN(); - - dcn10_log_hubbub_state(dc, log_ctx); - - dcn10_log_hubp_states(dc, log_ctx); - - DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" - " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 " - "C31 C32 C33 C34\n"); + DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" + " GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); for (i = 0; i < pool->pipe_count; i++) { struct dpp *dpp = pool->dpps[i]; struct dcn_dpp_state s = {0}; dpp->funcs->dpp_read_state(dpp, &s); + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); if (!s.is_enabled) continue; - DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s" - "%8x %08xh %08xh %08xh %08xh %08xh %08xh", + DTN_INFO("[%2d]: %11xh %11s %9s %9s" + " %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", dpp->inst, s.igam_input_format, (s.igam_lut_mode == 0) ? "BypassFixed" : @@ -329,16 +329,42 @@ void dcn10_log_hw_state(struct dc *dc, ((s.rgam_lut_mode == 3) ? "RAM" : ((s.rgam_lut_mode == 4) ? "RAM" : "Unknown")))), - s.gamut_remap_mode, - s.gamut_remap_c11_c12, - s.gamut_remap_c13_c14, - s.gamut_remap_c21_c22, - s.gamut_remap_c23_c24, - s.gamut_remap_c31_c32, - s.gamut_remap_c33_c34); + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); DTN_INFO("\n"); } DTN_INFO("\n"); + DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" + " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" + " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" + " blnd_lut:%d oscs:%d\n\n", + dc->caps.color.dpp.input_lut_shared, + dc->caps.color.dpp.icsc, + dc->caps.color.dpp.dgam_ram, + dc->caps.color.dpp.dgam_rom_caps.srgb, + dc->caps.color.dpp.dgam_rom_caps.bt2020, + dc->caps.color.dpp.dgam_rom_caps.gamma2_2, + dc->caps.color.dpp.dgam_rom_caps.pq, + dc->caps.color.dpp.dgam_rom_caps.hlg, + dc->caps.color.dpp.post_csc, + dc->caps.color.dpp.gamma_corr, + dc->caps.color.dpp.dgam_rom_for_yuv, + dc->caps.color.dpp.hw_3d_lut, + dc->caps.color.dpp.ogam_ram, + dc->caps.color.dpp.ocsc); DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n"); for (i = 0; i < pool->pipe_count; i++) { @@ -352,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc, s.idle); } DTN_INFO("\n"); + DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", + dc->caps.color.mpc.gamut_remap, + dc->caps.color.mpc.num_3dluts, + dc->caps.color.mpc.ogam_ram, + dc->caps.color.mpc.ocsc); +} + +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO_BEGIN(); + + dcn10_log_hubbub_state(dc, log_ctx); + + dcn10_log_hubp_states(dc, log_ctx); + + if (dc->hwss.log_color_state) + dc->hwss.log_color_state(dc, log_ctx); + else + dcn10_log_color_state(dc, log_ctx); DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n"); @@ -1840,6 +1890,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, { struct dpp *dpp = pipe_ctx->plane_res.dpp; + if (!stream) + return false; + if (dpp == NULL) return false; @@ -1862,8 +1915,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, } else dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); - if (stream != NULL && stream->ctx != NULL && - stream->out_transfer_func != NULL) { + if (stream->ctx && + stream->out_transfer_func) { log_tf(stream->ctx, stream->out_transfer_func, dpp->regamma_params.hw_points_num); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 4853ecac53f9..c55d5155ecb9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -71,6 +71,112 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +void dcn20_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth" + " 3DLUT size RGAM mode GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); + + for (i = 0; i < pool->pipe_count; i++) { + struct dpp *dpp = pool->dpps[i]; + struct dcn_dpp_state s = {0}; + + dpp->funcs->dpp_read_state(dpp, &s); + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + + if (!s.is_enabled) + continue; + + DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + dpp->inst, + (s.dgam_lut_mode == 0) ? "Bypass" : + ((s.dgam_lut_mode == 1) ? "sRGB" : + ((s.dgam_lut_mode == 2) ? "Ycc" : + ((s.dgam_lut_mode == 3) ? "RAM" : + ((s.dgam_lut_mode == 4) ? "RAM" : + "Unknown")))), + (s.shaper_lut_mode == 1) ? "RAM A" : + ((s.shaper_lut_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_mode == 1) ? "RAM A" : + ((s.lut3d_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", + (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", + (s.rgam_lut_mode == 1) ? "RAM A" : + ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"), + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" + " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" + " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" + " blnd_lut:%d oscs:%d\n\n", + dc->caps.color.dpp.input_lut_shared, + dc->caps.color.dpp.icsc, + dc->caps.color.dpp.dgam_ram, + dc->caps.color.dpp.dgam_rom_caps.srgb, + dc->caps.color.dpp.dgam_rom_caps.bt2020, + dc->caps.color.dpp.dgam_rom_caps.gamma2_2, + dc->caps.color.dpp.dgam_rom_caps.pq, + dc->caps.color.dpp.dgam_rom_caps.hlg, + dc->caps.color.dpp.post_csc, + dc->caps.color.dpp.gamma_corr, + dc->caps.color.dpp.dgam_rom_for_yuv, + dc->caps.color.dpp.hw_3d_lut, + dc->caps.color.dpp.ogam_ram, + dc->caps.color.dpp.ocsc); + + DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE" + " OGAM mode\n"); + + for (i = 0; i < pool->pipe_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %9s\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle, + (s.rgam_mode == 1) ? "RAM A" : + ((s.rgam_mode == 2) ? "RAM B" : + "Bypass")); + } + DTN_INFO("\n"); + DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", + dc->caps.color.mpc.gamut_remap, + dc->caps.color.mpc.num_3dluts, + dc->caps.color.mpc.ogam_ram, + dc->caps.color.mpc.ocsc); +} + + static int find_free_gsl_group(const struct dc *dc) { if (dc->res_pool->gsl_groups.gsl_0 == 0) @@ -1633,6 +1739,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.scaler || plane_state->update_flags.bits.scaling_change || plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.clip_size_change || plane_state->update_flags.bits.per_pixel_alpha_change || pipe_ctx->stream->update_flags.bits.scaling) { pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; @@ -1645,6 +1752,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { hubp->funcs->mem_program_viewport( @@ -1958,7 +2066,6 @@ void dcn20_program_front_end_for_ctx( && context->res_ctx.pipe_ctx[i].stream) hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); - /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -2561,7 +2668,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn20_reset_back_end_for_pipe( +void dcn20_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index b94c85340abf..5c874f7b0683 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -28,6 +28,8 @@ #include "hw_sequencer_private.h" +void dcn20_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); bool dcn20_set_shaper_3dlut( @@ -84,6 +86,10 @@ enum dc_status dcn20_enable_stream_timing( void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 8e88dcaf88f5..7252f5f781f0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return; + + otg_inst = tg->inst; if (dmcu) { dce110_set_pipe(pipe_ctx); return; } - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, panel_cntl->pwrseq_inst); - } else { - dmub_abm_set_pipe(abm, otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } + } else { + dmub_abm_set_pipe(abm, otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } } @@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); return true; } - if (abm != NULL) { - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; - - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, - otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } else { - dmub_abm_set_pipe(abm, - otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } - } + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + } else { + dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } - if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) + if (abm->funcs && abm->funcs->set_backlight_level_pwm) abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, frame_ramp, 0, panel_cntl->inst); else diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index c34c13e1e0a4..7e6b7f2a6dc9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -69,6 +69,155 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +void dcn30_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode" + " 3DLUT mode 3DLUT bit depth 3DLUT size RGAM mode" + " GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); + + for (i = 0; i < pool->pipe_count; i++) { + struct dpp *dpp = pool->dpps[i]; + struct dcn_dpp_state s = {0}; + + dpp->funcs->dpp_read_state(dpp, &s); + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + + if (!s.is_enabled) + continue; + + DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s" + " %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + dpp->inst, + s.pre_dgam_mode, + (s.pre_dgam_select == 0) ? "sRGB" : + ((s.pre_dgam_select == 1) ? "Gamma 2.2" : + ((s.pre_dgam_select == 2) ? "Gamma 2.4" : + ((s.pre_dgam_select == 3) ? "Gamma 2.6" : + ((s.pre_dgam_select == 4) ? "BT.709" : + ((s.pre_dgam_select == 5) ? "PQ" : + ((s.pre_dgam_select == 6) ? "HLG" : + "Unknown")))))), + (s.gamcor_mode == 0) ? "Bypass" : + ((s.gamcor_mode == 1) ? "RAM A" : + "RAM B"), + (s.shaper_lut_mode == 1) ? "RAM A" : + ((s.shaper_lut_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_mode == 1) ? "RAM A" : + ((s.lut3d_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", + (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", + (s.rgam_lut_mode == 0) ? "Bypass" : + ((s.rgam_lut_mode == 1) ? "RAM A" : + "RAM B"), + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" + " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" + " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" + " blnd_lut:%d oscs:%d\n\n", + dc->caps.color.dpp.input_lut_shared, + dc->caps.color.dpp.icsc, + dc->caps.color.dpp.dgam_ram, + dc->caps.color.dpp.dgam_rom_caps.srgb, + dc->caps.color.dpp.dgam_rom_caps.bt2020, + dc->caps.color.dpp.dgam_rom_caps.gamma2_2, + dc->caps.color.dpp.dgam_rom_caps.pq, + dc->caps.color.dpp.dgam_rom_caps.hlg, + dc->caps.color.dpp.post_csc, + dc->caps.color.dpp.gamma_corr, + dc->caps.color.dpp.dgam_rom_for_yuv, + dc->caps.color.dpp.hw_3d_lut, + dc->caps.color.dpp.ogam_ram, + dc->caps.color.dpp.ocsc); + + DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE" + " SHAPER mode 3DLUT mode 3DLUT bit-depth 3DLUT size OGAM mode OGAM LUT" + " GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); + + for (i = 0; i < pool->pipe_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + mpc3_get_gamut_remap(pool->mpc, i, &s.gamut_remap); + + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %11s %11s %16s %11s %10s %9s" + " %-12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle, + (s.shaper_lut_mode == 1) ? "RAM A" : + ((s.shaper_lut_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_mode == 1) ? "RAM A" : + ((s.lut3d_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", + (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", + (s.rgam_mode == 0) ? "Bypass" : + ((s.rgam_mode == 2) ? "RAM" : + "Unknown"), + (s.rgam_mode == 1) ? "B" : "A", + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + + } + DTN_INFO("\n"); + DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", + dc->caps.color.mpc.gamut_remap, + dc->caps.color.mpc.num_3dluts, + dc->caps.color.mpc.ogam_ram, + dc->caps.color.mpc.ocsc); +} + bool dcn30_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { @@ -1015,21 +1164,3 @@ void dcn30_prepare_bandwidth(struct dc *dc, if (!dc->clk_mgr->clks.fw_based_mclk_switching) dc_dmub_srv_p_state_delegate(dc, false, context); } - -void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_params *params) -{ - unsigned int i; - unsigned int triggers = 0; - - if (params->triggers.surface_update) - triggers |= 0x100; - if (params->triggers.cursor_update) - triggers |= 0x8; - if (params->triggers.force_trigger) - triggers |= 0x1; - - for (i = 0; i < num_pipes; i++) - pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, - triggers, params->num_frames); -} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index e557e2b98618..638f018a3cb5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup( unsigned int num_dwb, struct dc_writeback_info *wb_info); +void dcn30_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); + bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); @@ -90,7 +93,4 @@ void dcn30_set_hubp_blank(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); -void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_params *params); - #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 9894caedffed..ef913445a795 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn10_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 7423880fabb6..a760f0c6fe98 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -98,10 +98,8 @@ static void enable_memory_low_power(struct dc *dc) for (i = 0; i < dc->res_pool->stream_enc_count; i++) if (dc->res_pool->stream_enc[i]->vpg) dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); -#if defined(CONFIG_DRM_AMD_DC_FP) for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); -#endif } } @@ -617,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) if (hws->ctx->dc->debug.hpo_optimization) REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); } + +void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params) +{ + unsigned int i; + unsigned int triggers = 0; + + if (params->triggers.surface_update) + triggers |= 0x100; + if (params->triggers.cursor_update) + triggers |= 0x8; + if (params->triggers.force_trigger) + triggers |= 0x1; + + for (i = 0; i < num_pipes; i++) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h index edfc01d6ad73..b8bc939da155 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h @@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc, void dcn31_init_pipes(struct dc *dc, struct dc_state *context); void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); +void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params); + + #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 669f524bd064..c06cc2c5da92 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn31_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index ccb7e317e86a..542ce3b7f9e4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn31_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 6c9299c7683d..aa36d7a56ca8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc, } } +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; + enum dc_status status = DC_OK; + struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) } } for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - - if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx->stream == pipe_ctx_old->stream && + pipe_ctx->stream->link->link_state_valid) { + continue; } + + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) + continue; + + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) + continue; + + if (hws->funcs.apply_single_controller_ctx_to_hw) + status = hws->funcs.apply_single_controller_ctx_to_hw( + pipe_ctx, + context, + dc); + + ASSERT(status == DC_OK); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cecf7f0f5671..069e20bc87c0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc, void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); + void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 427cfc8c24a4..2b073123d3ed 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn31_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, + .disable_phantom_streams = dcn32_disable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, @@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 8b6c49622f3b..4b92df23ff0d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1342,8 +1342,8 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, { int i = 0; struct drr_params params = {0}; - // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow - unsigned int event_triggers = 0x800; + // DRR set trigger event mapped to OTG_TRIG_A + unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A // Note DRR trigger events are generated regardless of whether num frames met. unsigned int num_frames = 2; @@ -1377,3 +1377,20 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, } } } +void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params) +{ + unsigned int i; + unsigned int triggers = 0; + + if (params->triggers.surface_update) + triggers |= 0x200;/*bit 9 : 10 0000 0000*/ + if (params->triggers.cursor_update) + triggers |= 0x8;/*bit3*/ + if (params->triggers.force_trigger) + triggers |= 0x1; + for (i = 0; i < num_pipes; i++) + pipe_ctx[i]->stream_res.tg->funcs-> + set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index fd66316e33de..c354efa6c1b2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -90,4 +90,7 @@ uint32_t dcn35_get_idle_state(const struct dc *dc); void dcn35_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust); +void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index a630aa77dcec..a93073055e7b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -70,7 +70,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn35_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn35_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 143d3fc0221c..ab17fa1c64e8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn35_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index a54399383318..f89f205e42a1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -339,6 +339,8 @@ struct hw_sequencer_funcs { /* HW State Logging Related */ void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); + void (*log_color_state)(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); void (*clear_status_bits)(struct dc *dc, unsigned int mask); @@ -379,6 +381,7 @@ struct hw_sequencer_funcs { struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 6137cf09aa54..554cfab5ab24 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -155,7 +155,6 @@ struct hwseq_private_funcs { void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); -#ifdef CONFIG_DRM_AMD_DC_FP void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context); void (*update_force_pstate)(struct dc *dc, struct dc_state *context); void (*update_mall_sel)(struct dc *dc, struct dc_state *context); @@ -165,8 +164,14 @@ struct hwseq_private_funcs { void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + enum dc_status (*apply_single_controller_ctx_to_hw)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); -#endif + void (*reset_back_end_for_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h index 6ed1fb8c9300..b6203253111c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h @@ -43,7 +43,8 @@ struct audio_funcs { void (*az_configure)(struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, - const struct audio_info *audio_info); + const struct audio_info *audio_info, + const struct audio_dp_link_info *dp_link_info); void (*wall_dto_setup)(struct audio *audio, enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 6f4c97543c14..f4d4a68c91dc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -356,6 +356,7 @@ struct clk_mgr_internal { long long wm_range_table_addr; bool dpm_present; + bool pme_trigger_pending; }; struct clk_mgr_internal_funcs { @@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz) return (khz + 999) / 1000; } +static inline int khz_to_mhz_floor(int khz) +{ + return khz / 1000; +} + int clk_mgr_helper_get_active_display_cnt( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 901891316dfb..2ae7484d18af 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -26,6 +26,12 @@ #ifndef __DAL_DCHUBBUB_H__ #define __DAL_DCHUBBUB_H__ +/** + * DOC: overview + * + * There is only one common DCHUBBUB. It contains the common request and return + * blocks for the Data Fabric Interface that are not clock/power gated. + */ enum dcc_control { dcc_control__256_256_xxx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index f4aa76e02518..0f24afbf4388 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -27,6 +27,31 @@ #ifndef __DAL_DPP_H__ #define __DAL_DPP_H__ +/** + * DOC: overview + * + * The DPP (Display Pipe and Plane) block is the unified display data + * processing engine in DCN for processing graphic or video data on per DPP + * rectangle base. This rectangle can be a part of SLS (Single Large Surface), + * or a layer to be blended with other DPP, or a rectangle associated with a + * display tile. + * + * It provides various functions including: + * - graphic color keyer + * - graphic cursor compositing + * - graphic or video image source to destination scaling + * - image sharping + * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4 + * - Color Space Conversion + * - Host LUT gamma adjustment + * - Color Gamut Remap + * - brightness and contrast adjustment. + * + * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color + * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module + * connected in a video/graphics pipeline. + */ + #include "transform.h" #include "cursor_reg_cache.h" @@ -141,6 +166,7 @@ struct dcn_dpp_state { uint32_t igam_input_format; uint32_t dgam_lut_mode; uint32_t rgam_lut_mode; + // gamut_remap data for dcn10_get_cm_states() uint32_t gamut_remap_mode; uint32_t gamut_remap_c11_c12; uint32_t gamut_remap_c13_c14; @@ -148,6 +174,16 @@ struct dcn_dpp_state { uint32_t gamut_remap_c23_c24; uint32_t gamut_remap_c31_c32; uint32_t gamut_remap_c33_c34; + // gamut_remap data for dcn*_log_color_state() + struct dpp_grph_csc_adjustment gamut_remap; + uint32_t shaper_lut_mode; + uint32_t lut3d_mode; + uint32_t lut3d_bit_depth; + uint32_t lut3d_size; + uint32_t blnd_lut_mode; + uint32_t pre_dgam_mode; + uint32_t pre_dgam_select; + uint32_t gamcor_mode; }; struct CM_bias_params { @@ -290,6 +326,9 @@ struct dpp_funcs { void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); + + void (*dpp_get_gamut_remap)(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 7f3f9b69e903..72610cd7eae0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -26,13 +26,24 @@ #ifndef __DAL_HUBP_H__ #define __DAL_HUBP_H__ +/** + * DOC: overview + * + * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port + * (SDP) and DCN. This component has multiple features, such as memory + * arbitration, rotation, and cursor manipulation. + * + * There is one HUBP allocated per pipe, which fetches data and converts + * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved + * and fixed-depth streams of pixel data. + */ + #include "mem_input.h" #include "cursor_reg_cache.h" #define OPP_ID_INVALID 0xf #define MAX_TTU 0xffffff - enum cursor_pitch { CURSOR_PITCH_64_PIXELS = 0, CURSOR_PITCH_128_PIXELS, @@ -146,9 +157,7 @@ struct hubp_funcs { void (*set_blank)(struct hubp *hubp, bool blank); void (*set_blank_regs)(struct hubp *hubp, bool blank); -#ifdef CONFIG_DRM_AMD_DC_FP void (*phantom_hubp_post_enable)(struct hubp *hubp); -#endif void (*set_hubp_blank_en)(struct hubp *hubp, bool blank); void (*set_cursor_attributes)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 61a2406dcc53..ba9b942ce09f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -23,13 +23,28 @@ */ /** - * DOC: mpc-overview + * DOC: overview * - * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline + * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline * that performs blending of multiple planes, using global and per-pixel alpha. * It also performs post-blending color correction operations according to the * hardware capabilities, such as color transformation matrix and gamma 1D and * 3D LUT. + * + * MPC receives output from all DPP pipes and combines them to multiple outputs + * supporting "M MPC inputs -> N MPC outputs" flexible composition + * architecture. It features: + * + * - Programmable blending structure to allow software controlled blending and + * cascading; + * - Programmable window location of each DPP in active region of display; + * - Combining multiple DPP pipes in one active region when a single DPP pipe + * cannot process very large surface; + * - Combining multiple DPP from different SLS with blending; + * - Stereo formats from single DPP in top-bottom or side-by-side modes; + * - Stereo formats from 2 DPPs; + * - Alpha blending of multiple layers from different DPP pipes; + * - Programmable background color; */ #ifndef __DC_MPCC_H__ @@ -83,34 +98,66 @@ enum mpcc_alpha_blend_mode { /** * struct mpcc_blnd_cfg - MPCC blending configuration - * - * @black_color: background color - * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE) - * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the - * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE) - * @global_gain: used when blend mode considers both pixel alpha and plane - * alpha value and assumes the global alpha value. - * @global_alpha: plane alpha value - * @overlap_only: whether overlapping of different planes is allowed - * @bottom_gain_mode: blend mode for bottom gain setting - * @background_color_bpc: background color for bpc - * @top_gain: top gain setting - * @bottom_inside_gain: blend mode for bottom inside - * @bottom_outside_gain: blend mode for bottom outside */ struct mpcc_blnd_cfg { - struct tg_color black_color; /* background color */ - enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */ - bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */ + /** + * @black_color: background color. + */ + struct tg_color black_color; + + /** + * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE). + */ + enum mpcc_alpha_blend_mode alpha_mode; + + /*** + * @@pre_multiplied_alpha: + * + * Whether pixel color values were pre-multiplied by the alpha channel + * (MPCC_ALPHA_MULTIPLIED_MODE). + */ + bool pre_multiplied_alpha; + + /** + * @global_gain: Used when blend mode considers both pixel alpha and plane. + */ int global_gain; + + /** + * @global_alpha: Plane alpha value. + */ int global_alpha; + + /** + * @@overlap_only: Whether overlapping of different planes is allowed. + */ bool overlap_only; /* MPCC top/bottom gain settings */ + + /** + * @bottom_gain_mode: Blend mode for bottom gain setting. + */ int bottom_gain_mode; + + /** + * @background_color_bpc: Background color for bpc. + */ int background_color_bpc; + + /** + * @top_gain: Top gain setting. + */ int top_gain; + + /** + * @bottom_inside_gain: Blend mode for bottom inside. + */ int bottom_inside_gain; + + /** + * @bottom_outside_gain: Blend mode for bottom outside. + */ int bottom_outside_gain; }; @@ -150,34 +197,58 @@ struct mpc_dwb_flow_control { /** * struct mpcc - MPCC connection and blending configuration for a single MPCC instance. - * @mpcc_id: MPCC physical instance - * @dpp_id: DPP input to this MPCC - * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected. - * @blnd_cfg: the blending configuration for this MPCC - * @sm_cfg: stereo mix setting for this MPCC - * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false. * * This struct is used as a node in an MPC tree. */ struct mpcc { - int mpcc_id; /* MPCC physical instance */ - int dpp_id; /* DPP input to this MPCC */ - struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */ - struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */ - struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */ - bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */ + /** + * @mpcc_id: MPCC physical instance. + */ + int mpcc_id; + + /** + * @dpp_id: DPP input to this MPCC + */ + int dpp_id; + + /** + * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected. + */ + struct mpcc *mpcc_bot; + + /** + * @blnd_cfg: The blending configuration for this MPCC. + */ + struct mpcc_blnd_cfg blnd_cfg; + + /** + * @sm_cfg: stereo mix setting for this MPCC + */ + struct mpcc_sm_cfg sm_cfg; + + /** + * @shared_bottom: + * + * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false. + */ + bool shared_bottom; }; /** * struct mpc_tree - MPC tree represents all MPCC connections for a pipe. * - * @opp_id: the OPP instance that owns this MPC tree - * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint * */ struct mpc_tree { - int opp_id; /* The OPP instance that owns this MPC tree */ - struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */ + /** + * @opp_id: The OPP instance that owns this MPC tree. + */ + int opp_id; + + /** + * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint + */ + struct mpcc *opp_list; }; struct mpc { @@ -199,6 +270,13 @@ struct mpcc_state { uint32_t overlap_only; uint32_t idle; uint32_t busy; + uint32_t shaper_lut_mode; + uint32_t lut3d_mode; + uint32_t lut3d_bit_depth; + uint32_t lut3d_size; + uint32_t rgam_mode; + uint32_t rgam_lut; + struct mpc_grph_gamut_adjustment gamut_remap; }; /** @@ -217,16 +295,20 @@ struct mpc_funcs { * Only used for planes that are part of blending chain for OPP output * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be added to. - * [in] blnd_cfg - MPCC blending configuration for the new blending layer. - * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. - * stereo mix must disable for the very bottom layer of the tree config. - * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane. - * [in] dpp_id - DPP instance for the plane to be added. - * [in] mpcc_id - The MPCC physical instance to use for blending. - * - * Return: struct mpcc* - MPCC that was added. + * + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be added to. + * - [in] blnd_cfg - MPCC blending configuration for the new blending layer. + * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. + * stereo mix must disable for the very bottom layer of the tree config. + * - [in] insert_above_mpcc - Insert new plane above this MPCC. + * If NULL, insert as bottom plane. + * - [in] dpp_id - DPP instance for the plane to be added. + * - [in] mpcc_id - The MPCC physical instance to use for blending. + * + * Return: + * + * struct mpcc* - MPCC that was added. */ struct mpcc* (*insert_plane)( struct mpc *mpc, @@ -243,11 +325,14 @@ struct mpc_funcs { * Remove a specified MPCC from the MPC tree. * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be removed from. - * [in/out] mpcc - MPCC to be removed from tree. * - * Return: void + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be removed from. + * - [in/out] mpcc - MPCC to be removed from tree. + * + * Return: + * + * void */ void (*remove_mpcc)( struct mpc *mpc, @@ -260,9 +345,12 @@ struct mpc_funcs { * Reset the MPCC HW status by disconnecting all muxes. * * Parameters: - * [in/out] mpc - MPC context. * - * Return: void + * - [in/out] mpc - MPC context. + * + * Return: + * + * void */ void (*mpc_init)(struct mpc *mpc); void (*mpc_init_single_inst)( @@ -275,11 +363,14 @@ struct mpc_funcs { * Update the blending configuration for a specified MPCC. * * Parameters: - * [in/out] mpc - MPC context. - * [in] blnd_cfg - MPCC blending configuration. - * [in] mpcc_id - The MPCC physical instance. * - * Return: void + * - [in/out] mpc - MPC context. + * - [in] blnd_cfg - MPCC blending configuration. + * - [in] mpcc_id - The MPCC physical instance. + * + * Return: + * + * void */ void (*update_blending)( struct mpc *mpc, @@ -289,15 +380,18 @@ struct mpc_funcs { /** * @cursor_lock: * - * Lock cursor updates for the specified OPP. - * OPP defines the set of MPCC that are locked together for cursor. + * Lock cursor updates for the specified OPP. OPP defines the set of + * MPCC that are locked together for cursor. * * Parameters: - * [in] mpc - MPC context. - * [in] opp_id - The OPP to lock cursor updates on - * [in] lock - lock/unlock the OPP * - * Return: void + * - [in] mpc - MPC context. + * - [in] opp_id - The OPP to lock cursor updates on + * - [in] lock - lock/unlock the OPP + * + * Return: + * + * void */ void (*cursor_lock)( struct mpc *mpc, @@ -307,20 +401,25 @@ struct mpc_funcs { /** * @insert_plane_to_secondary: * - * Add DPP into secondary MPC tree based on specified blending position. - * Only used for planes that are part of blending chain for DWB output + * Add DPP into secondary MPC tree based on specified blending + * position. Only used for planes that are part of blending chain for + * DWB output * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be added to. - * [in] blnd_cfg - MPCC blending configuration for the new blending layer. - * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. - * stereo mix must disable for the very bottom layer of the tree config. - * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane. - * [in] dpp_id - DPP instance for the plane to be added. - * [in] mpcc_id - The MPCC physical instance to use for blending. - * - * Return: struct mpcc* - MPCC that was added. + * + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be added to. + * - [in] blnd_cfg - MPCC blending configuration for the new blending layer. + * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. + * stereo mix must disable for the very bottom layer of the tree config. + * - [in] insert_above_mpcc - Insert new plane above this MPCC. If + * NULL, insert as bottom plane. + * - [in] dpp_id - DPP instance for the plane to be added. + * - [in] mpcc_id - The MPCC physical instance to use for blending. + * + * Return: + * + * struct mpcc* - MPCC that was added. */ struct mpcc* (*insert_plane_to_secondary)( struct mpc *mpc, @@ -337,10 +436,14 @@ struct mpc_funcs { * Remove a specified DPP from the 'secondary' MPC tree. * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be removed from. - * [in] mpcc - MPCC to be removed from tree. - * Return: void + * + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be removed from. + * - [in] mpcc - MPCC to be removed from tree. + * + * Return: + * + * void */ void (*remove_mpcc_from_secondary)( struct mpc *mpc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7617fabbd16e..aee5372e292c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -23,6 +23,22 @@ * */ +/** + * DOC: overview + * + * The Output Plane Processor (OPP) block groups have functions that format + * pixel streams such that they are suitable for display at the display device. + * The key functions contained in the OPP are: + * + * - Adaptive Backlight Modulation (ABM) + * - Formatter (FMT) which provide pixel-by-pixel operations for format the + * incoming pixel stream. + * - Output Buffer that provide pixel replication, and overlapping. + * - Interface between MPC and OPTC. + * - Clock and reset generation. + * - CRC generation. + */ + #ifndef __DAL_OPP_H__ #define __DAL_OPP_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 5dcbaa2db964..e97d964a1791 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -57,7 +57,7 @@ struct panel_cntl_funcs { struct panel_cntl_init_data { struct dc_context *ctx; uint32_t inst; - uint32_t pwrseq_inst; + uint32_t eng_id; }; struct panel_cntl { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 9a00a99317b2..d98d72f35be5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -182,9 +182,7 @@ struct timing_generator_funcs { bool (*enable_crtc)(struct timing_generator *tg); bool (*disable_crtc)(struct timing_generator *tg); -#ifdef CONFIG_DRM_AMD_DC_FP void (*phantom_crtc_post_enable)(struct timing_generator *tg); -#endif void (*disable_phantom_crtc)(struct timing_generator *tg); bool (*immediate_disable_crtc)(struct timing_generator *tg); bool (*is_counter_moving)(struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index c958ef37b78a..77a60aa9f27b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe); int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe); /* - * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice - * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for a plane with MPCC combine. otherwise - * the number of MPC "cuts" for the plane. + * Get the number of MPC slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an MPC combine + * pipe topology. */ -int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head); +int resource_get_mpc_slice_count(const struct pipe_ctx *pipe); /* - * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice - * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for ODM combine. otherwise - * the number of ODM "cuts" for the timing. + * Get the number of ODM slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an ODM combine + * pipe topology. */ -int resource_get_odm_slice_count(const struct pipe_ctx *otg_master); +int resource_get_odm_slice_count(const struct pipe_ctx *pipe); /* Get the ODM slice index counting from 0 from left most slice */ int resource_get_odm_slice_index(const struct pipe_ctx *opp_head); diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 2d152b68a501..22b24749c9d2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -61,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) } } -static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) -{ - return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && - test_pattern <= DP_TEST_PATTERN_SQUARE_END); -} - -static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) -{ - if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && - test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || - test_pattern == DP_TEST_PATTERN_VIDEO_MODE) - return true; - else - return false; -} - static void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, bool skip_video_pattern) @@ -361,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_size); } - if (is_dp_phy_sqaure_pattern(test_pattern)) { + if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) { test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) core_link_read_dpcd( link, @@ -623,6 +607,8 @@ bool dp_set_test_pattern( if (pipe_ctx == NULL) return false; + link->pending_test_pattern = test_pattern; + /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ if (link->test_pattern_enabled && test_pattern == DP_TEST_PATTERN_VIDEO_MODE) { @@ -643,12 +629,13 @@ bool dp_set_test_pattern( /* Reset Test Pattern state */ link->test_pattern_enabled = false; link->current_test_pattern = test_pattern; + link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED; return true; } /* Check for PHY Test Patterns */ - if (is_dp_phy_pattern(test_pattern)) { + if (IS_DP_PHY_PATTERN(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && @@ -681,6 +668,7 @@ bool dp_set_test_pattern( /* Set Test Pattern state */ link->test_pattern_enabled = true; link->current_test_pattern = test_pattern; + link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED; if (p_link_settings != NULL) dpcd_set_link_settings(link, p_link_settings); @@ -756,7 +744,7 @@ bool dp_set_test_pattern( return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - if (is_dp_phy_sqaure_pattern(test_pattern)) + if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) core_link_write_dpcd(link, DP_LINK_SQUARE_PATTERN, p_custom_pattern, @@ -884,6 +872,7 @@ bool dp_set_test_pattern( /* Set Test Pattern state */ link->test_pattern_enabled = true; link->current_test_pattern = test_pattern; + link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index f4633d3cf9b9..a1f72fe378ee 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -22,6 +22,16 @@ * Authors: AMD * */ + +/** + * DOC: overview + * + * Display Input Output (DIO), is the display input and output unit in DCN. It + * includes output encoders to support different display output, like + * DisplayPort, HDMI, DVI interface, and others. It also includes the control + * and status channels for these interfaces. + */ + #ifndef __LINK_HWSS_DIO_H__ #define __LINK_HWSS_DIO_H__ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c index b659baa23147..348ea4cb832d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c @@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; if (tp_params == NULL) return false; - if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && - link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { + if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern)) // Deprogram overrides from previous test pattern dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); - } switch (tp_params->dp_phy_pattern) { case DP_TEST_PATTERN_80BIT_CUSTOM: if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern, pltpat_custom, tp_params->custom_pattern_size) != 0) return false; + hw_tp_params.custom_pattern = tp_params->custom_pattern; + hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size; break; case DP_TEST_PATTERN_D102: break; @@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = { bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link) { - if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) - return false; - - if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) - return false; - - return true; + return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN); } const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index b621b97711b6..3e6c7be7e278 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { + uint8_t clk_src = 0x4C; + uint8_t pattern = 0x4F; /* SQ128 */ + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; - const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; - const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src}; + const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src}; const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21}; const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21}; - const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; - const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern}; + const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern}; const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20}; const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20}; const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20}; @@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + if (tp_params == NULL) return false; - if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || - tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { + if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) { // Deprogram overrides from previously set square wave override if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || link->current_test_pattern == DP_TEST_PATTERN_D102) link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); - else + else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern)) dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); return false; @@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); - dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); - return true; } @@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { - link_res->hpo_dp_link_enc->funcs->set_ffe( - link_res->hpo_dp_link_enc, - link_settings, - lane_settings[0].FFE_PRESET.raw); - - // FFE is programmed when retimer is programmed for SQ128, but explicit - // programming needed here as well in case FFE-only update is requested - if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && - link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) - dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); + // Don't update our HW FFE when outputting phy test patterns + if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) { + // Directly program FIXED_VS retimer FFE for SQ128 override + if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) { + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); + } + } else { + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); + } } static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, @@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = { bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link) { - if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) - return false; - - if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) - return false; - - return true; + return requires_fixed_vs_pe_retimer_dio_link_hwss(link); } const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 24153b0df503..b8c4a04dd175 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -41,6 +41,7 @@ #include "protocols/link_dp_dpia.h" #include "protocols/link_dp_phy.h" #include "protocols/link_dp_training.h" +#include "protocols/link_dp_dpia_bw.h" #include "accessories/link_dp_trace.h" #include "link_enc_cfg.h" @@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link, if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->reported_link_cap.link_rate > LINK_RATE_HIGH3) link->reported_link_cap.link_rate = LINK_RATE_HIGH3; + + /* + * If this is DP over USB4 link then we need to: + * - Enable BW ALLOC support on DPtx if applicable + */ + if (dc->config.usb4_bw_alloc_support) { + if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) { + /* update with non reduced link cap if bw allocation mode is supported */ + if (link->dpia_bw_alloc_config.nrd_max_link_rate && + link->dpia_bw_alloc_config.nrd_max_lane_count) { + link->reported_link_cap.link_rate = + link->dpia_bw_alloc_config.nrd_max_link_rate; + link->reported_link_cap.lane_count = + link->dpia_bw_alloc_config.nrd_max_lane_count; + } + } + } break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 3cbfbf8d107e..a72de44a5747 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2197,6 +2197,64 @@ static enum dc_status enable_link( static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw) { + struct dc_link *link = stream->sink->link; + int req_bw = bw; + + DC_LOGGER_INIT(link->ctx->logger); + + if (!link->dpia_bw_alloc_config.bw_alloc_enabled) + return false; + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + int sink_index = 0; + int i = 0; + + for (i = 0; i < link->sink_count; i++) { + if (link->remote_sinks[i] == NULL) + continue; + + if (stream->sink->sink_id != link->remote_sinks[i]->sink_id) + req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i]; + else + sink_index = i; + } + + link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; + } + + /* get dp overhead for dp tunneling */ + link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); + req_bw += link->dpia_bw_alloc_config.dp_overhead; + + if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) { + if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) { + DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n", + __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.dp_overhead); + } else { + // Cannot get the required bandwidth. + DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n", + __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.dp_overhead); + return false; + } + } else { + DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__); + return false; + } + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + int i = 0; + + for (i = 0; i < link->sink_count; i++) { + if (link->remote_sinks[i] == NULL) + continue; + DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__, + (const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]), + link->dpia_bw_alloc_config.remote_sink_req_bw[i]); + } + } + return true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 37d3027c32dc..cf22b8f28ba6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter( } } -static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link) -{ - uint8_t pwrseq_inst = 0xF; - struct dc_context *dc_ctx = link->dc->ctx; - - DC_LOGGER_INIT(dc_ctx->logger); - - switch (link->eng_id) { - case ENGINE_ID_DIGA: - pwrseq_inst = 0; - break; - case ENGINE_ID_DIGB: - pwrseq_inst = 1; - break; - default: - DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id); - ASSERT(false); - break; - } - - return pwrseq_inst; -} - - static void link_destruct(struct dc_link *link) { int i; @@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link, link->link_id.id == CONNECTOR_ID_LVDS)) { panel_cntl_init_data.ctx = dc_ctx; panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count; - panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link); + panel_cntl_init_data.eng_id = link->eng_id; link->panel_cntl = link->dc->res_pool->funcs->panel_cntl_create( &panel_cntl_init_data); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 8fe66c367850..1aed55b0ab6a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing( if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter struct dc_crtc_timing outputTiming = *timing; -#if defined(CONFIG_DRM_AMD_DC_FP) if (timing->flags.DSC && !timing->dsc_cfg.is_frl) /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; -#endif if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; @@ -361,7 +359,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; int num_dpias = 0; - for (uint8_t i = 0; i < num_streams; ++i) { + for (unsigned int i = 0; i < num_streams; ++i) { if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { /* new dpia sst stream, check whether it exceeds max dpia */ if (num_dpias >= MAX_DPIA_NUM) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c index 0050e0a06cbc..2fa4e64e2430 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -37,6 +37,7 @@ #include "clk_mgr.h" #include "resource.h" #include "link_enc_cfg.h" +#include "atomfirmware.h" #define DC_LOGGER \ link->ctx->logger @@ -100,8 +101,11 @@ void dp_set_hw_lane_settings( { const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + // Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && - !is_immediate_downstream(link, offset)) + !is_immediate_downstream(link, offset) && + (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) || + link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING)) return; if (link_hwss->ext.set_dp_lane_settings) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 5a0b04518956..e538c67d3ed9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status( { enum link_training_result status = LINK_TRAINING_SUCCESS; union lane_status lane_status; + union lane_align_status_updated dpcd_lane_status_updated; uint8_t dpcd_buf[6] = {0}; uint32_t lane; @@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status( * check lanes status */ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + dpcd_lane_status_updated.raw = dpcd_buf[4]; if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { + !lane_status.bits.SYMBOL_LOCKED_0 || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) { /* if one of the channel equalization, clock * recovery or symbol lock is dropped * consider it as (link has been @@ -1505,10 +1508,7 @@ enum link_training_result dp_perform_link_training( * Non-LT AUX transactions inside training mode. */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) - if (link->dc->config.use_old_fixed_vs_sequence) - status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, <_settings); - else - status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); else if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, link_res, <_settings); else if (encoding == DP_128b_132b_ENCODING) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index e8dda44b23cb..5d36bab0029c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent( uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern; - uint32_t wait_time_microsec; + uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 7087cdc9e977..b5cf75975fff 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -186,356 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq return status; } - -enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - const uint8_t offset = dp_parse_lttpr_repeater_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; - const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = 0; - uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; - uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; - const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; - const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; - const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; - const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; - const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; - enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t lane = 0; - union down_spread_ctrl downspread = {0}; - union lane_count_set lane_count_set = {0}; - uint8_t toggle_rate; - uint8_t rate; - - /* Only 8b/10b is supported */ - ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING); - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); - return status; - } - - if (offset != 0xFF) { - if (offset == 2) { - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; - - /* Certain display and cable configuration require extra delay */ - } else if (offset > 2) { - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; - } - } - - /* Vendor specific: Reset lane settings */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - - /* Vendor specific: Enable intercept */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); - - - /* 1. set link rate, lane count and spread. */ - - downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); - - lane_count_set.bits.LANE_COUNT_SET = - lt_settings->link_settings.lane_count; - - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - - if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = - link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; - } - - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); - - core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); - - rate = get_dpcd_link_rate(<_settings->link_settings); - - /* Vendor specific: Toggle link rate */ - toggle_rate = (rate == 0x6) ? 0xA : 0x6; - - if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &toggle_rate, - 1); - } - - link->vendor_specific_lttpr_link_rate_wa = rate; - - core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_BW_SET, - lt_settings->link_settings.link_rate, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_dpmf[0], - sizeof(vendor_lttpr_write_data_dpmf)); - - if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); - } - - /* 2. Perform link training */ - - /* Perform Clock Recovery Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - const uint8_t max_vendor_dpcd_retries = 10; - uint32_t retries_cr; - uint32_t retry_count; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - uint8_t i = 0; - - retries_cr = 0; - retry_count = 0; - - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); - - while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - - /* 1. call HWSS to set lane settings */ - dp_set_hw_lane_settings( - link, - link_res, - lt_settings, - 0); - - /* 2. update DPCD of the receiver */ - if (!retry_count) { - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration. - */ - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - lt_settings->pattern_for_cr, - 0); - /* Vendor specific: Disable intercept */ - for (i = 0; i < max_vendor_dpcd_retries; i++) { - if (pre_disable_intercept_delay_ms != 0) - msleep(pre_disable_intercept_delay_ms); - if (link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis))) - break; - - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); - } - } else { - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - - dpcd_set_lane_settings( - link, - lt_settings, - 0); - } - - /* 3. wait receiver to lock-on*/ - wait_time_microsec = lt_settings->cr_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested drive - * settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 6. max VS reached*/ - if (dp_is_max_vs_reached(lt_settings)) - break; - - /* 7. same lane settings */ - /* Note: settings are the same for all lanes, - * so comparing first lane is sufficient - */ - if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == - dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) - retries_cr++; - else - retries_cr = 0; - - /* 8. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - retry_count++; - } - - if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { - ASSERT(0); - DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", - __func__, - LINK_TRAINING_MAX_CR_RETRY); - - } - - status = dp_get_cr_failure(lane_count, dpcd_lane_status); - } - - /* Perform Channel EQ Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - enum dc_dp_training_pattern tr_pattern; - uint32_t retries_ch_eq; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; - - dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); - - status = LINK_TRAINING_EQ_FAIL_EQ; - - for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; - retries_ch_eq++) { - - dp_set_hw_lane_settings(link, link_res, lt_settings, 0); - - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - - /* 2. update DPCD*/ - if (!retries_ch_eq) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration - */ - - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - tr_pattern, 0); - else - dpcd_set_lane_settings(link, lt_settings, 0); - - /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested - * drive settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_EQ_FAIL_CR; - break; - } - - /* 6. check CHEQ done*/ - if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 7. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - } - } - - return status; -} - enum link_training_result dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, const struct link_resource *link_res, @@ -620,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( rate = get_dpcd_link_rate(<_settings->link_settings); - /* Vendor specific: Toggle link rate */ - toggle_rate = (rate == 0x6) ? 0xA : 0x6; + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) { + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; - if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &toggle_rate, - 1); - } + if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } - link->vendor_specific_lttpr_link_rate_wa = rate; + link->vendor_specific_lttpr_link_rate_wa = rate; + } core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h index c0d6ea329504..e61970e27661 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h @@ -28,11 +28,6 @@ #define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ #include "link_dp_training.h" -enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings); - enum link_training_result dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, const struct link_resource *link_res, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 046d3e205415..acfbbc638cc6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -287,7 +287,7 @@ bool set_default_brightness_aux(struct dc_link *link) if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; - // if < 1 nits or > 5000, it might be wrong readback + // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel. if (default_backlight < 1000 || default_backlight > 5000000) default_backlight = 150000; @@ -892,7 +892,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, /* Set power optimization flag */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { - if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { + if (replay != NULL && link->replay_settings.replay_feature_enabled && + replay->funcs->replay_set_power_opt) { replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); link->replay_settings.replay_power_opt_active = *power_opts; } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 37a64186f324..ecc477ef8e3b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params optimal_uclk_for_dcfclk_sta_targets[i] = bw_params->clk_table.entries[j].memclk_mhz * 16; break; + } else { + /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]): + * If it just so happens that the memory bandwidth is low enough such that + * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA + * target, we need to populate the optimal UCLK for each DCFCLK STA target to + * be the max UCLK. + */ + if (j == num_uclk_states - 1) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + } } } } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 511ff6b5b985..7538b548c572 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) { + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { kfree(enc1); kfree(vpg); kfree(afmt); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 31035fc3d868..04d142f97474 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1941,8 +1941,6 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; - dc->config.use_old_fixed_vs_sequence = true; - /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index c4d71e7f18af..6f10052caeef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context( dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + if (dc->config.enable_windowed_mpo_odm && + dc->debug.enable_single_display_2to1_odm_policy) { + switch (resource_get_odm_slice_count(pipe)) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + } else { + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 74412e5f03fe..6f832bf278cf 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1760,6 +1760,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->config.dc_mode_clk_limit_support = true; + dc->config.enable_windowed_mpo_odm = false; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 761ec9891875..5d52853cac96 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -701,7 +701,7 @@ static const struct dc_plane_cap plane_cap = { // 6:1 downscaling ratio: 1000/6 = 166.666 .max_downscale_factor = { - .argb8888 = 167, + .argb8888 = 250, .nv12 = 167, .fp16 = 167 }, @@ -764,6 +764,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .minimum_z8_residency_time = 2100, .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, @@ -780,8 +781,9 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400, + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = true, .static_screen_wait_frames = 2, }; @@ -1905,7 +1907,8 @@ static bool dcn35_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - + /*HW default is to have all the FGCG enabled, SW no need to program them*/ + dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2130,6 +2133,7 @@ static bool dcn35_resource_construct( dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index c78c9224ab60..ff2a65e67bd4 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -78,6 +78,12 @@ struct dmub_srv_dcn31_regs; struct dmcub_trace_buf_entry; +/* enum dmub_window_memory_type - memory location type specification for windows */ +enum dmub_window_memory_type { + DMUB_WINDOW_MEMORY_TYPE_FB = 0, + DMUB_WINDOW_MEMORY_TYPE_GART +}; + /* enum dmub_status - return code for dmcub functions */ enum dmub_status { DMUB_STATUS_OK = 0, @@ -119,6 +125,7 @@ enum dmub_window_id { DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, DMUB_WINDOW_7_SCRATCH_MEM, + DMUB_WINDOW_SHARED_STATE, DMUB_WINDOW_TOTAL, }; @@ -203,7 +210,7 @@ struct dmub_srv_region_params { uint32_t vbios_size; const uint8_t *fw_inst_const; const uint8_t *fw_bss_data; - bool is_mailbox_in_inbox; + const enum dmub_window_memory_type *window_memory_type; }; /** @@ -223,7 +230,7 @@ struct dmub_srv_region_params { */ struct dmub_srv_region_info { uint32_t fb_size; - uint32_t inbox_size; + uint32_t gart_size; uint8_t num_regions; struct dmub_region regions[DMUB_WINDOW_TOTAL]; }; @@ -239,9 +246,10 @@ struct dmub_srv_region_info { struct dmub_srv_memory_params { const struct dmub_srv_region_info *region_info; void *cpu_fb_addr; - void *cpu_inbox_addr; + void *cpu_gart_addr; uint64_t gpu_fb_addr; - uint64_t gpu_inbox_addr; + uint64_t gpu_gart_addr; + const enum dmub_window_memory_type *window_memory_type; }; /** @@ -361,7 +369,8 @@ struct dmub_srv_hw_funcs { const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void (*setup_mailbox)(struct dmub_srv *dmub, const struct dmub_region *inbox1); @@ -443,7 +452,6 @@ struct dmub_srv_create_params { struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs *hw_funcs; void *user_ctx; - struct dc_context *dc_ctx; enum dmub_asic asic; uint32_t fw_version; bool is_virtual; @@ -455,6 +463,7 @@ struct dmub_srv_create_params { * @user_ctx: user provided context for the dmub_srv * @fw_version: the current firmware version, if any * @is_virtual: false if hardware support only + * @shared_state: dmub shared state between firmware and driver * @fw_state: dmub firmware state pointer */ struct dmub_srv { @@ -463,6 +472,7 @@ struct dmub_srv { uint32_t fw_version; bool is_virtual; struct dmub_fb scratch_mem_fb; + volatile struct dmub_shared_state_feature_block *shared_state; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index e699731ee68e..a529e369b2ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -26,15 +26,6 @@ #ifndef DMUB_CMD_H #define DMUB_CMD_H -#if defined(_TEST_HARNESS) || defined(FPGA_USB4) -#include "dmub_fw_types.h" -#include "include_legacy/atomfirmware.h" - -#if defined(_TEST_HARNESS) -#include <string.h> -#endif -#else - #include <asm/byteorder.h> #include <linux/types.h> #include <linux/string.h> @@ -42,8 +33,6 @@ #include "atomfirmware.h" -#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4) - //<DMUB_TYPES>================================================================== /* Basic type definitions. */ @@ -403,15 +392,16 @@ union replay_debug_flags { /** * 0x400 (bit 10) - * @force_disable_ips1: Force disable IPS1 state + * @enable_ips_visual_confirm: Enable IPS visual confirm when entering IPS + * If we enter IPS2, the Visual confirm bar will change to yellow */ - uint32_t force_disable_ips1 : 1; + uint32_t enable_ips_visual_confirm : 1; /** * 0x800 (bit 11) - * @force_disable_ips2: Force disable IPS2 state + * @enable_ips_residency_profiling: Enable IPS residency profiling */ - uint32_t force_disable_ips2 : 1; + uint32_t enable_ips_residency_profiling : 1; uint32_t reserved : 20; } bitfields; @@ -518,6 +508,8 @@ struct dmub_visual_confirm_color { * @trace_buffer_size: size of the tracebuffer region * @fw_version: the firmware version information * @dal_fw: 1 if the firmware is DAL + * @shared_state_size: size of the shared state region in bytes + * @shared_state_features: number of shared state features */ struct dmub_fw_meta_info { uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */ @@ -526,6 +518,9 @@ struct dmub_fw_meta_info { uint32_t fw_version; /**< the firmware version information */ uint8_t dal_fw; /**< 1 if the firmware is DAL */ uint8_t reserved[3]; /**< padding bits */ + uint32_t shared_state_size; /**< size of the shared state region in bytes */ + uint16_t shared_state_features; /**< number of shared state features */ + uint16_t reserved2; /**< padding bytes */ }; /** @@ -670,6 +665,116 @@ enum dmub_fw_boot_options_bit { }; //============================================================================== +//< DMUB_SHARED_STATE>========================================================== +//============================================================================== + +/** + * Shared firmware state between driver and firmware for lockless communication + * in situations where the inbox/outbox may be unavailable. + * + * Each structure *must* be at most 256-bytes in size. The layout allocation is + * described below: + * + * [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]... + */ + +/** + * enum dmub_shared_state_feature_id - List of shared state features. + */ +enum dmub_shared_state_feature_id { + DMUB_SHARED_SHARE_FEATURE__INVALID = 0, + DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, + DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, + DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ +}; + +/** + * struct dmub_shared_state_ips_fw - Firmware signals for IPS. + */ +union dmub_shared_state_ips_fw_signals { + struct { + uint32_t ips1_commit : 1; /**< 1 if in IPS1 */ + uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ + uint32_t reserved_bits : 30; /**< Reversed */ + } bits; + uint32_t all; +}; + +/** + * struct dmub_shared_state_ips_signals - Firmware signals for IPS. + */ +union dmub_shared_state_ips_driver_signals { + struct { + uint32_t allow_pg : 1; /**< 1 if PG is allowed */ + uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ + uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ + uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ + uint32_t reserved_bits : 28; /**< Reversed bits */ + } bits; + uint32_t all; +}; + +/** + * IPS FW Version + */ +#define DMUB_SHARED_STATE__IPS_FW_VERSION 1 + +/** + * struct dmub_shared_state_ips_fw - Firmware state for IPS. + */ +struct dmub_shared_state_ips_fw { + union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */ + uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ +}; /* 248-bytes, fixed */ + +/** + * IPS Driver Version + */ +#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1 + +/** + * struct dmub_shared_state_ips_driver - Driver state for IPS. + */ +struct dmub_shared_state_ips_driver { + union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */ + uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ +}; /* 248-bytes, fixed */ + +/** + * enum dmub_shared_state_feature_common - Generic payload. + */ +struct dmub_shared_state_feature_common { + uint32_t padding[62]; +}; /* 248-bytes, fixed */ + +/** + * enum dmub_shared_state_feature_header - Feature description. + */ +struct dmub_shared_state_feature_header { + uint16_t id; /**< Feature ID */ + uint16_t version; /**< Feature version */ + uint32_t reserved; /**< Reserved bytes. */ +}; /* 8 bytes, fixed */ + +/** + * struct dmub_shared_state_feature_block - Feature block. + */ +struct dmub_shared_state_feature_block { + struct dmub_shared_state_feature_header header; /**< Shared state header. */ + union dmub_shared_feature_state_union { + struct dmub_shared_state_feature_common common; /**< Generic data */ + struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ + struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ + } data; /**< Shared state data. */ +}; /* 256-bytes, fixed */ + +/** + * Shared state size in bytes. + */ +#define DMUB_FW_HEADER_SHARED_STATE_SIZE \ + ((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block)) + +//============================================================================== //</DMUB_STATUS>================================================================ //============================================================================== //< DMUB_VBIOS>================================================================= @@ -1270,11 +1375,11 @@ struct dmub_cmd_PLAT_54186_wa { uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */ struct { - uint8_t hubp_inst : 4; /**< HUBP instance */ - uint8_t tmz_surface : 1; /**< TMZ enable or disable */ - uint8_t immediate :1; /**< Immediate flip */ - uint8_t vmid : 4; /**< VMID */ - uint8_t grph_stereo : 1; /**< 1 if stereo */ + uint32_t hubp_inst : 4; /**< HUBP instance */ + uint32_t tmz_surface : 1; /**< TMZ enable or disable */ + uint32_t immediate :1; /**< Immediate flip */ + uint32_t vmid : 4; /**< VMID */ + uint32_t grph_stereo : 1; /**< 1 if stereo */ uint32_t reserved : 21; /**< Reserved */ } flip_params; /**< Pageflip parameters */ uint32_t reserved[9]; /**< Reserved bits */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 98dad0d47e72..cae96fba6349 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; uint64_t fb_base, fb_offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index 1df128e57ed3..de287b101848 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c index 81dae75e9ff8..a4abe951c838 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c @@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h index 9a3afffd9b0f..066f35a50094 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h @@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); #endif /* _DMUB_DCN30_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 094e9f864557..2bcf5fb87dd9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 4d520a893c7b..eccdab4986ce 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 2daa1e0c8061..0d521eeda050 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -32,8 +32,6 @@ #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" -#define DCN_BASE__INST0_SEG2 0x000034C0 - #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] #define CTX dmub #define REGS dmub->regs_dcn32 @@ -218,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index b0cd8d29402f..29c1132951af 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 6d1fbea0f6ba..60223efc6fc8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; @@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub, REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, DMCUB_REGION3_CW6_ENABLE, 1); + + offset = region6->offset; + + REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0, + DMCUB_REGION6_TOP_ADDRESS, + region6->region.top - region6->region.base - 1, + DMCUB_REGION6_ENABLE, 1); } void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub, diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h index 129a7031d2ae..686e97c00ccc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h @@ -89,6 +89,9 @@ struct dmub_srv; DMUB_SR(DMCUB_REGION5_OFFSET) \ DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION6_OFFSET) \ + DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \ DMUB_SR(DMCUB_SCRATCH0) \ DMUB_SR(DMCUB_SCRATCH1) \ DMUB_SR(DMCUB_SCRATCH2) \ @@ -154,6 +157,8 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ @@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 9ad738805320..fb66832dc996 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -78,6 +78,7 @@ #define DMUB_CW6_BASE (0x66000000) #define DMUB_REGION5_BASE (0xA0000000) +#define DMUB_REGION6_BASE (0xC0000000) static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs; static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs; @@ -417,58 +418,44 @@ void dmub_srv_destroy(struct dmub_srv *dmub) dmub_memset(dmub, 0, sizeof(*dmub)); } +static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out, + const uint32_t *window_sizes, + enum dmub_window_memory_type memory_type) +{ + uint32_t i, top = 0; + + for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) { + if (params->window_memory_type[i] == memory_type) { + struct dmub_region *region = &out->regions[i]; + + region->base = dmub_align(top, 256); + region->top = region->base + dmub_align(window_sizes[i], 64); + top = region->top; + } + } + + return dmub_align(top, 4096); +} + enum dmub_status -dmub_srv_calc_region_info(struct dmub_srv *dmub, - const struct dmub_srv_region_params *params, - struct dmub_srv_region_info *out) + dmub_srv_calc_region_info(struct dmub_srv *dmub, + const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out) { - struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; - struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; - struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; - struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; - struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; - struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; - struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; - struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; - uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; - uint32_t previous_top = 0; + uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 }; + if (!dmub->sw_init) return DMUB_STATUS_INVALID; memset(out, 0, sizeof(*out)); + memset(window_sizes, 0, sizeof(window_sizes)); out->num_regions = DMUB_NUM_WINDOWS; - inst->base = 0x0; - inst->top = inst->base + params->inst_const_size; - - data->base = dmub_align(inst->top, 256); - data->top = data->base + params->bss_data_size; - - /* - * All cache windows below should be aligned to the size - * of the DMCUB cache line, 64 bytes. - */ - - stack->base = dmub_align(data->top, 256); - stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; - - bios->base = dmub_align(stack->top, 256); - bios->top = bios->base + params->vbios_size; - - if (params->is_mailbox_in_inbox) { - mail->base = 0; - mail->top = mail->base + DMUB_MAILBOX_SIZE; - previous_top = bios->top; - } else { - mail->base = dmub_align(bios->top, 256); - mail->top = mail->base + DMUB_MAILBOX_SIZE; - previous_top = mail->top; - } - fw_info = dmub_get_fw_meta_info(params); if (fw_info) { @@ -486,19 +473,21 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, dmub->fw_version = fw_info->fw_version; } - trace_buff->base = dmub_align(previous_top, 256); - trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); - - fw_state->base = dmub_align(trace_buff->top, 256); - fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); - - scratch_mem->base = dmub_align(fw_state->top, 256); - scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); + window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size; + window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; + window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size; + window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size; + window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE; + window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; + window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; + window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE; - out->fb_size = dmub_align(scratch_mem->top, 4096); + out->fb_size = + dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); - if (params->is_mailbox_in_inbox) - out->inbox_size = dmub_align(mail->top, 4096); + out->gart_size = + dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART); return DMUB_STATUS_OK; } @@ -507,8 +496,6 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub, const struct dmub_srv_memory_params *params, struct dmub_srv_fb_info *out) { - uint8_t *cpu_base; - uint64_t gpu_base; uint32_t i; if (!dmub->sw_init) @@ -519,19 +506,16 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub, if (params->region_info->num_regions != DMUB_NUM_WINDOWS) return DMUB_STATUS_INVALID; - cpu_base = (uint8_t *)params->cpu_fb_addr; - gpu_base = params->gpu_fb_addr; - for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { const struct dmub_region *reg = ¶ms->region_info->regions[i]; - out->fb[i].cpu_addr = cpu_base + reg->base; - out->fb[i].gpu_addr = gpu_base + reg->base; - - if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) { - out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base; - out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base; + if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) { + out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base; + out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base; + } else { + out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base; + out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base; } out->fb[i].size = reg->top - reg->base; @@ -583,9 +567,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; - struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; + struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6; struct dmub_region inbox1, outbox1, outbox0; if (!dmub->sw_init) @@ -670,10 +655,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->fw_state = fw_state_fb->cpu_addr; + region6.offset.quad_part = shared_state_fb->gpu_addr; + region6.region.base = DMUB_CW6_BASE; + region6.region.top = region6.region.base + shared_state_fb->size; + + dmub->shared_state = shared_state_fb->cpu_addr; + dmub->scratch_mem_fb = *scratch_mem_fb; if (dmub->hw_funcs.setup_windows) - dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); + dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); if (dmub->hw_funcs.setup_outbox0) dmub->hw_funcs.setup_outbox0(dmub, &outbox0); @@ -809,11 +800,20 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub) { + union dmub_fw_boot_status status; + if (!dmub->hw_funcs.is_hw_powered_up) return true; - return dmub->hw_funcs.is_hw_powered_up(dmub) && - dmub->hw_funcs.is_hw_init(dmub); + if (!dmub->hw_funcs.is_hw_powered_up(dmub)) + return false; + + if (!dmub->hw_funcs.is_hw_init(dmub)) + return false; + + status = dmub->hw_funcs.get_fw_status(dmub); + + return status.bits.dal_fw && status.bits.mailbox_rdy; } enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub, diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h index 915a031a43cb..e4a26143f14c 100644 --- a/drivers/gpu/drm/amd/display/include/audio_types.h +++ b/drivers/gpu/drm/amd/display/include/audio_types.h @@ -27,11 +27,21 @@ #define __AUDIO_TYPES_H__ #include "signal_types.h" +#include "fixed31_32.h" +#include "dc_dp_types.h" #define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20 #define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18 #define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF +struct audio_dp_link_info { + uint32_t link_bandwidth_kbps; + uint32_t hblank_min_symbol_width; + enum dp_link_encoding encoding; + enum dc_link_rate link_rate; + enum dc_lane_count lane_count; + bool is_mst; +}; struct audio_crtc_info { uint32_t h_total; @@ -42,7 +52,10 @@ struct audio_crtc_info { uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */ uint32_t refresh_rate; enum dc_color_depth color_depth; + enum dc_pixel_encoding pixel_encoding; bool interlaced; + uint32_t dsc_bits_per_pixel; + uint32_t dsc_num_slices; }; struct azalia_clock_info { uint32_t pixel_clock_in_10khz; @@ -95,6 +108,8 @@ struct audio_output { enum signal_type signal; /* video timing */ struct audio_crtc_info crtc_info; + /* DP link info */ + struct audio_dp_link_info dp_link_info; /* PLL for audio */ struct audio_pll_info pll_info; }; diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 1b8ab20f1715..92dbff22a7c6 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -169,6 +169,15 @@ enum dp_test_pattern { DP_TEST_PATTERN_UNSUPPORTED }; +#define IS_DP_PHY_SQUARE_PATTERN(test_pattern)\ + (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&\ + test_pattern <= DP_TEST_PATTERN_SQUARE_END) + +#define IS_DP_PHY_PATTERN(test_pattern)\ + ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&\ + test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||\ + test_pattern == DP_TEST_PATTERN_VIDEO_MODE) + enum dp_test_pattern_color_space { DP_TEST_PATTERN_COLOR_SPACE_RGB, DP_TEST_PATTERN_COLOR_SPACE_YCBCR601, diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index df2c7ffe190f..b0a6256e89f4 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -174,6 +174,7 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_ATHUB (1 << 16) #define AMD_PG_SUPPORT_JPEG (1 << 17) #define AMD_PG_SUPPORT_IH_SRAM_PG (1 << 18) +#define AMD_PG_SUPPORT_JPEG_DPG (1 << 19) /** * enum PP_FEATURE_MASK - Used to mask power play features. @@ -244,6 +245,7 @@ enum DC_FEATURE_MASK { DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default + DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4 }; enum DC_DEBUG_MASK { diff --git a/drivers/gpu/drm/amd/include/arct_ip_offset.h b/drivers/gpu/drm/amd/include/arct_ip_offset.h index af1c46991429..7dd876f7df74 100644 --- a/drivers/gpu/drm/amd/include/arct_ip_offset.h +++ b/drivers/gpu/drm/amd/include/arct_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; } __maybe_unused; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h new file mode 100644 index 000000000000..84483366ab6a --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h @@ -0,0 +1,287 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _athub_4_1_0_OFFSET_HEADER +#define _athub_4_1_0_OFFSET_HEADER + + + +// addressBlock: athub_xpbdec +// base address: 0x3000 +#define regXPB_RTR_SRC_APRTR0 0x0000 +#define regXPB_RTR_SRC_APRTR0_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR1 0x0001 +#define regXPB_RTR_SRC_APRTR1_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR2 0x0002 +#define regXPB_RTR_SRC_APRTR2_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR3 0x0003 +#define regXPB_RTR_SRC_APRTR3_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR4 0x0004 +#define regXPB_RTR_SRC_APRTR4_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR5 0x0005 +#define regXPB_RTR_SRC_APRTR5_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR6 0x0006 +#define regXPB_RTR_SRC_APRTR6_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR7 0x0007 +#define regXPB_RTR_SRC_APRTR7_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR8 0x0008 +#define regXPB_RTR_SRC_APRTR8_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR9 0x0009 +#define regXPB_RTR_SRC_APRTR9_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR10 0x000a +#define regXPB_RTR_SRC_APRTR10_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR11 0x000b +#define regXPB_RTR_SRC_APRTR11_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR12 0x000c +#define regXPB_RTR_SRC_APRTR12_BASE_IDX 0 +#define regXPB_RTR_SRC_APRTR13 0x000d +#define regXPB_RTR_SRC_APRTR13_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP0 0x000e +#define regXPB_RTR_DEST_MAP0_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP1 0x000f +#define regXPB_RTR_DEST_MAP1_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP2 0x0010 +#define regXPB_RTR_DEST_MAP2_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP3 0x0011 +#define regXPB_RTR_DEST_MAP3_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP4 0x0012 +#define regXPB_RTR_DEST_MAP4_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP5 0x0013 +#define regXPB_RTR_DEST_MAP5_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP6 0x0014 +#define regXPB_RTR_DEST_MAP6_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP7 0x0015 +#define regXPB_RTR_DEST_MAP7_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP8 0x0016 +#define regXPB_RTR_DEST_MAP8_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP9 0x0017 +#define regXPB_RTR_DEST_MAP9_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP10 0x0018 +#define regXPB_RTR_DEST_MAP10_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP11 0x0019 +#define regXPB_RTR_DEST_MAP11_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP12 0x001a +#define regXPB_RTR_DEST_MAP12_BASE_IDX 0 +#define regXPB_RTR_DEST_MAP13 0x001b +#define regXPB_RTR_DEST_MAP13_BASE_IDX 0 +#define regXPB_CLG_CFG0 0x001c +#define regXPB_CLG_CFG0_BASE_IDX 0 +#define regXPB_CLG_CFG1 0x001d +#define regXPB_CLG_CFG1_BASE_IDX 0 +#define regXPB_CLG_CFG2 0x001e +#define regXPB_CLG_CFG2_BASE_IDX 0 +#define regXPB_CLG_CFG3 0x001f +#define regXPB_CLG_CFG3_BASE_IDX 0 +#define regXPB_CLG_CFG4 0x0020 +#define regXPB_CLG_CFG4_BASE_IDX 0 +#define regXPB_CLG_CFG5 0x0021 +#define regXPB_CLG_CFG5_BASE_IDX 0 +#define regXPB_CLG_CFG6 0x0022 +#define regXPB_CLG_CFG6_BASE_IDX 0 +#define regXPB_CLG_CFG7 0x0023 +#define regXPB_CLG_CFG7_BASE_IDX 0 +#define regXPB_CLG_EXTRA0 0x0024 +#define regXPB_CLG_EXTRA0_BASE_IDX 0 +#define regXPB_CLG_EXTRA1 0x0025 +#define regXPB_CLG_EXTRA1_BASE_IDX 0 +#define regXPB_CLG_EXTRA_MSK 0x0026 +#define regXPB_CLG_EXTRA_MSK_BASE_IDX 0 +#define regXPB_LB_ADDR 0x0027 +#define regXPB_LB_ADDR_BASE_IDX 0 +#define regXPB_HST_CFG 0x0028 +#define regXPB_HST_CFG_BASE_IDX 0 +#define regXPB_P2P_BAR_CFG 0x0029 +#define regXPB_P2P_BAR_CFG_BASE_IDX 0 +#define regXPB_P2P_BAR0 0x002a +#define regXPB_P2P_BAR0_BASE_IDX 0 +#define regXPB_P2P_BAR1 0x002b +#define regXPB_P2P_BAR1_BASE_IDX 0 +#define regXPB_P2P_BAR2 0x002c +#define regXPB_P2P_BAR2_BASE_IDX 0 +#define regXPB_P2P_BAR3 0x002d +#define regXPB_P2P_BAR3_BASE_IDX 0 +#define regXPB_P2P_BAR4 0x002e +#define regXPB_P2P_BAR4_BASE_IDX 0 +#define regXPB_P2P_BAR5 0x002f +#define regXPB_P2P_BAR5_BASE_IDX 0 +#define regXPB_P2P_BAR6 0x0030 +#define regXPB_P2P_BAR6_BASE_IDX 0 +#define regXPB_P2P_BAR7 0x0031 +#define regXPB_P2P_BAR7_BASE_IDX 0 +#define regXPB_P2P_BAR_SETUP 0x0032 +#define regXPB_P2P_BAR_SETUP_BASE_IDX 0 +#define regXPB_P2P_BAR_DELTA_ABOVE 0x0034 +#define regXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 0 +#define regXPB_P2P_BAR_DELTA_BELOW 0x0035 +#define regXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR0 0x0036 +#define regXPB_PEER_SYS_BAR0_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR1 0x0037 +#define regXPB_PEER_SYS_BAR1_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR2 0x0038 +#define regXPB_PEER_SYS_BAR2_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR3 0x0039 +#define regXPB_PEER_SYS_BAR3_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR4 0x003a +#define regXPB_PEER_SYS_BAR4_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR5 0x003b +#define regXPB_PEER_SYS_BAR5_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR6 0x003c +#define regXPB_PEER_SYS_BAR6_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR7 0x003d +#define regXPB_PEER_SYS_BAR7_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR8 0x003e +#define regXPB_PEER_SYS_BAR8_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR9 0x003f +#define regXPB_PEER_SYS_BAR9_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR10 0x0040 +#define regXPB_PEER_SYS_BAR10_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR11 0x0041 +#define regXPB_PEER_SYS_BAR11_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR12 0x0042 +#define regXPB_PEER_SYS_BAR12_BASE_IDX 0 +#define regXPB_PEER_SYS_BAR13 0x0043 +#define regXPB_PEER_SYS_BAR13_BASE_IDX 0 +#define regXPB_CLK_GAT 0x0044 +#define regXPB_CLK_GAT_BASE_IDX 0 +#define regXPB_INTF_CFG 0x0045 +#define regXPB_INTF_CFG_BASE_IDX 0 +#define regXPB_INTF_STS 0x0046 +#define regXPB_INTF_STS_BASE_IDX 0 +#define regXPB_PIPE_STS 0x0047 +#define regXPB_PIPE_STS_BASE_IDX 0 +#define regXPB_WCB_STS 0x0048 +#define regXPB_WCB_STS_BASE_IDX 0 +#define regXPB_MAP_INVERT_FLUSH_NUM_LSB 0x0049 +#define regXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 0 +#define regXPB_STICKY 0x004a +#define regXPB_STICKY_BASE_IDX 0 +#define regXPB_STICKY_W1C 0x004b +#define regXPB_STICKY_W1C_BASE_IDX 0 +#define regXPB_SUB_CTRL 0x004c +#define regXPB_SUB_CTRL_BASE_IDX 0 +#define regXPB_PERF_KNOBS 0x004d +#define regXPB_PERF_KNOBS_BASE_IDX 0 +#define regXPB_MISC_CFG 0x004e +#define regXPB_MISC_CFG_BASE_IDX 0 +#define regXPB_INTF_CFG2 0x004f +#define regXPB_INTF_CFG2_BASE_IDX 0 +#define regXPB_CLG_EXTRA_RD 0x0050 +#define regXPB_CLG_EXTRA_RD_BASE_IDX 0 +#define regXPB_CLG_EXTRA_MSK_RD 0x0051 +#define regXPB_CLG_EXTRA_MSK_RD_BASE_IDX 0 +#define regXPB_CLG_GFX_MATCH 0x0052 +#define regXPB_CLG_GFX_MATCH_BASE_IDX 0 +#define regXPB_CLG_GFX_MATCH_VLD 0x0053 +#define regXPB_CLG_GFX_MATCH_VLD_BASE_IDX 0 +#define regXPB_CLG_GFX_MATCH_MSK 0x0054 +#define regXPB_CLG_GFX_MATCH_MSK_BASE_IDX 0 +#define regXPB_CLG_MM_MATCH 0x0055 +#define regXPB_CLG_MM_MATCH_BASE_IDX 0 +#define regXPB_CLG_MM_MATCH_VLD 0x0056 +#define regXPB_CLG_MM_MATCH_VLD_BASE_IDX 0 +#define regXPB_CLG_MM_MATCH_MSK 0x0057 +#define regXPB_CLG_MM_MATCH_MSK_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING0 0x005a +#define regXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING1 0x005b +#define regXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING2 0x005c +#define regXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING3 0x005d +#define regXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING4 0x005e +#define regXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING5 0x005f +#define regXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING6 0x0060 +#define regXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 0 +#define regXPB_CLG_GFX_UNITID_MAPPING7 0x0061 +#define regXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 0 +#define regXPB_CLG_MM_UNITID_MAPPING0 0x0062 +#define regXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 0 +#define regXPB_CLG_MM_UNITID_MAPPING1 0x0063 +#define regXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 0 +#define regXPB_CLG_MM_UNITID_MAPPING2 0x0064 +#define regXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 0 +#define regXPB_CLG_MM_UNITID_MAPPING3 0x0065 +#define regXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 0 + + +// addressBlock: athub_rpbdec +// base address: 0x31d0 +#define regATHUB_SHARED_VIRT_RESET_REQ 0x0074 +#define regATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0 +#define regATHUB_MEM_POWER_LS 0x007f +#define regATHUB_MEM_POWER_LS_BASE_IDX 0 +#define regATHUB_MISC_CNTL 0x0080 +#define regATHUB_MISC_CNTL_BASE_IDX 0 +#define regRPB_PASSPW_CONF 0x0081 +#define regRPB_PASSPW_CONF_BASE_IDX 0 +#define regRPB_BLOCKLEVEL_CONF 0x0082 +#define regRPB_BLOCKLEVEL_CONF_BASE_IDX 0 +#define regRPB_TAG_CONF 0x0083 +#define regRPB_TAG_CONF_BASE_IDX 0 +#define regRPB_ARB_CNTL 0x0085 +#define regRPB_ARB_CNTL_BASE_IDX 0 +#define regRPB_ARB_CNTL2 0x0086 +#define regRPB_ARB_CNTL2_BASE_IDX 0 +#define regRPB_BIF_CNTL 0x0087 +#define regRPB_BIF_CNTL_BASE_IDX 0 +#define regRPB_BIF_CNTL2 0x0088 +#define regRPB_BIF_CNTL2_BASE_IDX 0 +#define regRPB_SDPPORT_CNTL 0x0089 +#define regRPB_SDPPORT_CNTL_BASE_IDX 0 +#define regRPB_NBIF_SDPPORT_CNTL 0x008a +#define regRPB_NBIF_SDPPORT_CNTL_BASE_IDX 0 +#define regRPB_DEINTRLV_COMBINE_CNTL 0x008c +#define regRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 0 +#define regRPB_VC_SWITCH_RDWR 0x008d +#define regRPB_VC_SWITCH_RDWR_BASE_IDX 0 +#define regRPB_ATS_CNTL3 0x008e +#define regRPB_ATS_CNTL3_BASE_IDX 0 +#define regRPB_DF_SDPPORT_CNTL 0x008f +#define regRPB_DF_SDPPORT_CNTL_BASE_IDX 0 +#define regRPB_ATS_CNTL 0x0090 +#define regRPB_ATS_CNTL_BASE_IDX 0 +#define regRPB_ATS_CNTL2 0x0091 +#define regRPB_ATS_CNTL2_BASE_IDX 0 +#define regRPB_PERFCOUNTER0_CFG 0x0092 +#define regRPB_PERFCOUNTER0_CFG_BASE_IDX 0 +#define regRPB_PERFCOUNTER1_CFG 0x0093 +#define regRPB_PERFCOUNTER1_CFG_BASE_IDX 0 +#define regRPB_PERFCOUNTER2_CFG 0x0094 +#define regRPB_PERFCOUNTER2_CFG_BASE_IDX 0 +#define regRPB_PERFCOUNTER3_CFG 0x0095 +#define regRPB_PERFCOUNTER3_CFG_BASE_IDX 0 +#define regRPB_PERFCOUNTER_RSLT_CNTL 0x0096 +#define regRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regRPB_PERF_COUNTER_CNTL 0x0097 +#define regRPB_PERF_COUNTER_CNTL_BASE_IDX 0 +#define regRPB_PERFCOUNTER_HI 0x0098 +#define regRPB_PERFCOUNTER_HI_BASE_IDX 0 +#define regRPB_PERFCOUNTER_LO 0x0099 +#define regRPB_PERFCOUNTER_LO_BASE_IDX 0 +#define regRPB_PERF_COUNTER_STATUS 0x009a +#define regRPB_PERF_COUNTER_STATUS_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h new file mode 100644 index 000000000000..56499fd62239 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h @@ -0,0 +1,1348 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _athub_4_1_0_SH_MASK_HEADER +#define _athub_4_1_0_SH_MASK_HEADER + + +// addressBlock: athub_xpbdec +//XPB_RTR_SRC_APRTR0 +#define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR1 +#define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR2 +#define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR3 +#define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR4 +#define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR5 +#define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR6 +#define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR7 +#define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR8 +#define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR9 +#define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR10 +#define XPB_RTR_SRC_APRTR10__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR10__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR11 +#define XPB_RTR_SRC_APRTR11__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR11__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR12 +#define XPB_RTR_SRC_APRTR12__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR12__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_SRC_APRTR13 +#define XPB_RTR_SRC_APRTR13__BASE_ADDR__SHIFT 0x0 +#define XPB_RTR_SRC_APRTR13__BASE_ADDR_MASK 0x7FFFFFFFL +//XPB_RTR_DEST_MAP0 +#define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP1 +#define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP2 +#define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP3 +#define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP4 +#define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP5 +#define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP6 +#define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP7 +#define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP8 +#define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP9 +#define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP10 +#define XPB_RTR_DEST_MAP10__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP10__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP10__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP10__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP10__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP10__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP10__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP10__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP10__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP10__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP10__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP10__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP11 +#define XPB_RTR_DEST_MAP11__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP11__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP11__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP11__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP11__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP11__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP11__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP11__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP11__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP11__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP11__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP11__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP12 +#define XPB_RTR_DEST_MAP12__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP12__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP12__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP12__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP12__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP12__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP12__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP12__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP12__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP12__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP12__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP12__APRTR_SIZE_MASK 0x7C000000L +//XPB_RTR_DEST_MAP13 +#define XPB_RTR_DEST_MAP13__NMR__SHIFT 0x0 +#define XPB_RTR_DEST_MAP13__DEST_OFFSET__SHIFT 0x1 +#define XPB_RTR_DEST_MAP13__DEST_SEL__SHIFT 0x14 +#define XPB_RTR_DEST_MAP13__DEST_SEL_RPB__SHIFT 0x18 +#define XPB_RTR_DEST_MAP13__SIDE_OK__SHIFT 0x19 +#define XPB_RTR_DEST_MAP13__APRTR_SIZE__SHIFT 0x1a +#define XPB_RTR_DEST_MAP13__NMR_MASK 0x00000001L +#define XPB_RTR_DEST_MAP13__DEST_OFFSET_MASK 0x000FFFFEL +#define XPB_RTR_DEST_MAP13__DEST_SEL_MASK 0x00F00000L +#define XPB_RTR_DEST_MAP13__DEST_SEL_RPB_MASK 0x01000000L +#define XPB_RTR_DEST_MAP13__SIDE_OK_MASK 0x02000000L +#define XPB_RTR_DEST_MAP13__APRTR_SIZE_MASK 0x7C000000L +//XPB_CLG_CFG0 +#define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG0__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG1 +#define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG1__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG2 +#define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG2__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG3 +#define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG3__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG4 +#define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG4__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG5 +#define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG5__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG6 +#define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG6__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_CFG7 +#define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0 +#define XPB_CLG_CFG7__LB_TYPE__SHIFT 0x4 +#define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7 +#define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa +#define XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0xe +#define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL +#define XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L +#define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L +#define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L +#define XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003C000L +//XPB_CLG_EXTRA0 +#define XPB_CLG_EXTRA0__CMP0_HIGH__SHIFT 0x0 +#define XPB_CLG_EXTRA0__CMP0_LOW__SHIFT 0x8 +#define XPB_CLG_EXTRA0__VLD0__SHIFT 0xd +#define XPB_CLG_EXTRA0__CLG0_NUM__SHIFT 0xe +#define XPB_CLG_EXTRA0__CMP0_HIGH_MASK 0x000000FFL +#define XPB_CLG_EXTRA0__CMP0_LOW_MASK 0x00001F00L +#define XPB_CLG_EXTRA0__VLD0_MASK 0x00002000L +#define XPB_CLG_EXTRA0__CLG0_NUM_MASK 0x0001C000L +//XPB_CLG_EXTRA1 +#define XPB_CLG_EXTRA1__CMP1_HIGH__SHIFT 0x0 +#define XPB_CLG_EXTRA1__CMP1_LOW__SHIFT 0x8 +#define XPB_CLG_EXTRA1__VLD1__SHIFT 0xd +#define XPB_CLG_EXTRA1__CLG1_NUM__SHIFT 0xe +#define XPB_CLG_EXTRA1__CMP1_HIGH_MASK 0x000000FFL +#define XPB_CLG_EXTRA1__CMP1_LOW_MASK 0x00001F00L +#define XPB_CLG_EXTRA1__VLD1_MASK 0x00002000L +#define XPB_CLG_EXTRA1__CLG1_NUM_MASK 0x0001C000L +//XPB_CLG_EXTRA_MSK +#define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0 +#define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x8 +#define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xd +#define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x15 +#define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x000000FFL +#define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x00001F00L +#define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x001FE000L +#define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x03E00000L +//XPB_LB_ADDR +#define XPB_LB_ADDR__CMP0__SHIFT 0x0 +#define XPB_LB_ADDR__MASK0__SHIFT 0xa +#define XPB_LB_ADDR__CMP1__SHIFT 0x14 +#define XPB_LB_ADDR__MASK1__SHIFT 0x1a +#define XPB_LB_ADDR__CMP0_MASK 0x000003FFL +#define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L +#define XPB_LB_ADDR__CMP1_MASK 0x03F00000L +#define XPB_LB_ADDR__MASK1_MASK 0xFC000000L +//XPB_HST_CFG +#define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0 +#define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L +//XPB_P2P_BAR_CFG +#define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0 +#define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4 +#define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6 +#define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7 +#define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8 +#define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9 +#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa +#define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb +#define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc +#define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL +#define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L +#define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L +#define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L +#define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L +#define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L +#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L +#define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L +#define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L +//XPB_P2P_BAR0 +#define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR0__VALID__SHIFT 0xc +#define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR0__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR0__VALID_MASK 0x00001000L +#define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR0__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR1 +#define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR1__VALID__SHIFT 0xc +#define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR1__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR1__VALID_MASK 0x00001000L +#define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR1__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR2 +#define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR2__VALID__SHIFT 0xc +#define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR2__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR2__VALID_MASK 0x00001000L +#define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR2__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR3 +#define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR3__VALID__SHIFT 0xc +#define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR3__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR3__VALID_MASK 0x00001000L +#define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR3__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR4 +#define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR4__VALID__SHIFT 0xc +#define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR4__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR4__VALID_MASK 0x00001000L +#define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR4__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR5 +#define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR5__VALID__SHIFT 0xc +#define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR5__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR5__VALID_MASK 0x00001000L +#define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR5__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR6 +#define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR6__VALID__SHIFT 0xc +#define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR6__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR6__VALID_MASK 0x00001000L +#define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR6__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR7 +#define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0 +#define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4 +#define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR7__VALID__SHIFT 0xc +#define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR7__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL +#define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L +#define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR7__VALID_MASK 0x00001000L +#define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR7__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR_SETUP +#define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0 +#define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8 +#define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc +#define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd +#define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe +#define XPB_P2P_BAR_SETUP__RESERVE__SHIFT 0xf +#define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10 +#define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL +#define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L +#define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L +#define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L +#define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L +#define XPB_P2P_BAR_SETUP__RESERVE_MASK 0x00008000L +#define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L +//XPB_P2P_BAR_DELTA_ABOVE +#define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0 +#define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8 +#define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL +#define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L +//XPB_P2P_BAR_DELTA_BELOW +#define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0 +#define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8 +#define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL +#define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L +//XPB_PEER_SYS_BAR0 +#define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR1 +#define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR2 +#define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR3 +#define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR4 +#define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR5 +#define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR6 +#define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR7 +#define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR8 +#define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR9 +#define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR10 +#define XPB_PEER_SYS_BAR10__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR10__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR10__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR10__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR11 +#define XPB_PEER_SYS_BAR11__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR11__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR11__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR11__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR12 +#define XPB_PEER_SYS_BAR12__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR12__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR12__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR12__ADDR_MASK 0xFFFFFFFEL +//XPB_PEER_SYS_BAR13 +#define XPB_PEER_SYS_BAR13__VALID__SHIFT 0x0 +#define XPB_PEER_SYS_BAR13__ADDR__SHIFT 0x1 +#define XPB_PEER_SYS_BAR13__VALID_MASK 0x00000001L +#define XPB_PEER_SYS_BAR13__ADDR_MASK 0xFFFFFFFEL +//XPB_CLK_GAT +#define XPB_CLK_GAT__ONDLY__SHIFT 0x0 +#define XPB_CLK_GAT__OFFDLY__SHIFT 0x6 +#define XPB_CLK_GAT__RDYDLY__SHIFT 0xc +#define XPB_CLK_GAT__ENABLE__SHIFT 0x12 +#define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13 +#define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL +#define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L +#define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L +#define XPB_CLK_GAT__ENABLE_MASK 0x00040000L +#define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L +//XPB_INTF_CFG +#define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0 +#define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8 +#define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10 +#define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK__SHIFT 0x17 +#define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b +#define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d +#define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e +#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA__SHIFT 0x1f +#define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL +#define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L +#define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L +#define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK_MASK 0x00800000L +#define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L +#define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L +#define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L +#define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA_MASK 0x80000000L +//XPB_INTF_STS +#define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0 +#define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8 +#define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf +#define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10 +#define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11 +#define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12 +#define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13 +#define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL +#define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L +#define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L +#define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L +#define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L +#define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L +#define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L +//XPB_PIPE_STS +#define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0 +#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1 +#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8 +#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf +#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10 +#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11 +#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12 +#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13 +#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14 +#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15 +#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16 +#define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17 +#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18 +#define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L +#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL +#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L +#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L +#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L +#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L +#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L +#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L +#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L +#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L +#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L +#define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L +#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L +//XPB_WCB_STS +#define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0 +#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10 +#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17 +#define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL +#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L +#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L +//XPB_MAP_INVERT_FLUSH_NUM_LSB +#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0 +#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL +//XPB_STICKY +#define XPB_STICKY__BITS__SHIFT 0x0 +#define XPB_STICKY__BITS_MASK 0xFFFFFFFFL +//XPB_STICKY_W1C +#define XPB_STICKY_W1C__BITS__SHIFT 0x0 +#define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL +//XPB_SUB_CTRL +#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0 +#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1 +#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2 +#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3 +#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4 +#define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5 +#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6 +#define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7 +#define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8 +#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9 +#define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa +#define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb +#define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc +#define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd +#define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe +#define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf +#define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10 +#define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11 +#define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12 +#define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13 +#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L +#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L +#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L +#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L +#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L +#define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L +#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L +#define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L +#define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L +#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L +#define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L +#define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L +#define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L +#define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L +#define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L +#define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L +#define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L +#define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L +#define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L +#define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L +//XPB_PERF_KNOBS +#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0 +#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6 +#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc +#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL +#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L +#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L +//XPB_MISC_CFG +#define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0 +#define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8 +#define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10 +#define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18 +#define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f +#define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL +#define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L +#define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L +#define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L +#define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L +//XPB_INTF_CFG2 +#define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0 +#define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL +//XPB_CLG_EXTRA_RD +#define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0 +#define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6 +#define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb +#define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc +#define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf +#define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15 +#define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a +#define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b +#define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL +#define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L +#define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L +#define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L +#define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L +#define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L +#define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L +#define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L +//XPB_CLG_EXTRA_MSK_RD +#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0 +#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6 +#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb +#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11 +#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL +#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L +#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L +#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L +//XPB_CLG_GFX_MATCH +#define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0 +#define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x8 +#define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0x10 +#define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x18 +#define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x000000FFL +#define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x0000FF00L +#define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x00FF0000L +#define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0xFF000000L +//XPB_CLG_GFX_MATCH_VLD +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0 +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1 +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2 +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3 +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L +#define XPB_CLG_GFX_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L +//XPB_CLG_GFX_MATCH_MSK +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0 +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8 +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10 +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18 +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L +#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L +//XPB_CLG_MM_MATCH +#define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0 +#define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x8 +#define XPB_CLG_MM_MATCH__FARBIRC2_ID__SHIFT 0x10 +#define XPB_CLG_MM_MATCH__FARBIRC3_ID__SHIFT 0x18 +#define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x000000FFL +#define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x0000FF00L +#define XPB_CLG_MM_MATCH__FARBIRC2_ID_MASK 0x00FF0000L +#define XPB_CLG_MM_MATCH__FARBIRC3_ID_MASK 0xFF000000L +//XPB_CLG_MM_MATCH_VLD +#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD__SHIFT 0x0 +#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD__SHIFT 0x1 +#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD__SHIFT 0x2 +#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD__SHIFT 0x3 +#define XPB_CLG_MM_MATCH_VLD__FARBIRC0_VLD_MASK 0x00000001L +#define XPB_CLG_MM_MATCH_VLD__FARBIRC1_VLD_MASK 0x00000002L +#define XPB_CLG_MM_MATCH_VLD__FARBIRC2_VLD_MASK 0x00000004L +#define XPB_CLG_MM_MATCH_VLD__FARBIRC3_VLD_MASK 0x00000008L +//XPB_CLG_MM_MATCH_MSK +#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0 +#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x8 +#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0x10 +#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x18 +#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x000000FFL +#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x0000FF00L +#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x00FF0000L +#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0xFF000000L +//XPB_CLG_GFX_UNITID_MAPPING0 +#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING1 +#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING2 +#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING3 +#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING4 +#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING5 +#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING6 +#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_GFX_UNITID_MAPPING7 +#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_MM_UNITID_MAPPING0 +#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_MM_UNITID_MAPPING1 +#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_MM_UNITID_MAPPING2 +#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L +//XPB_CLG_MM_UNITID_MAPPING3 +#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0 +#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5 +#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6 +#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL +#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L +#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L + + +// addressBlock: athub_rpbdec +//ATHUB_SHARED_VIRT_RESET_REQ +#define ATHUB_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define ATHUB_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define ATHUB_SHARED_VIRT_RESET_REQ__VF_MASK 0x7FFFFFFFL +#define ATHUB_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L +//ATHUB_MEM_POWER_LS +#define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0 +#define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6 +#define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL +#define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x0007FFC0L +//ATHUB_MISC_CNTL +#define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x0 +#define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x6 +#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x7 +#define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x8 +#define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x9 +#define ATHUB_MISC_CNTL__ALWAYS_BUSY__SHIFT 0xf +#define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x10 +#define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x11 +#define ATHUB_MISC_CNTL__RPB_BUSY__SHIFT 0x12 +#define ATHUB_MISC_CNTL__XPB_BUSY__SHIFT 0x13 +#define ATHUB_MISC_CNTL__ATS_BUSY__SHIFT 0x14 +#define ATHUB_MISC_CNTL__SDPNCS_BUSY__SHIFT 0x15 +#define ATHUB_MISC_CNTL__DFPORT_BUSY__SHIFT 0x16 +#define ATHUB_MISC_CNTL__SWITCH_CNTL__SHIFT 0x17 +#define ATHUB_MISC_CNTL__LS_DELAY_ENABLE__SHIFT 0x18 +#define ATHUB_MISC_CNTL__LS_DELAY_TIME__SHIFT 0x19 +#define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE__SHIFT 0x1e +#define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x0000003FL +#define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00000040L +#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00000080L +#define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00000100L +#define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x00007E00L +#define ATHUB_MISC_CNTL__ALWAYS_BUSY_MASK 0x00008000L +#define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x00010000L +#define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x00020000L +#define ATHUB_MISC_CNTL__RPB_BUSY_MASK 0x00040000L +#define ATHUB_MISC_CNTL__XPB_BUSY_MASK 0x00080000L +#define ATHUB_MISC_CNTL__ATS_BUSY_MASK 0x00100000L +#define ATHUB_MISC_CNTL__SDPNCS_BUSY_MASK 0x00200000L +#define ATHUB_MISC_CNTL__DFPORT_BUSY_MASK 0x00400000L +#define ATHUB_MISC_CNTL__SWITCH_CNTL_MASK 0x00800000L +#define ATHUB_MISC_CNTL__LS_DELAY_ENABLE_MASK 0x01000000L +#define ATHUB_MISC_CNTL__LS_DELAY_TIME_MASK 0x3E000000L +#define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE_MASK 0x40000000L +//RPB_PASSPW_CONF +#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0 +#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1 +#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE__SHIFT 0x2 +#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN__SHIFT 0x3 +#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE__SHIFT 0x4 +#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN__SHIFT 0x5 +#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE__SHIFT 0x6 +#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN__SHIFT 0x7 +#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE__SHIFT 0x8 +#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN__SHIFT 0x9 +#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0xa +#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xb +#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE__SHIFT 0xc +#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd +#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0xe +#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0xf +#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x10 +#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x11 +#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x12 +#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0x13 +#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0x14 +#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0x15 +#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x16 +#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x17 +#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L +#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L +#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_MASK 0x00000004L +#define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN_MASK 0x00000008L +#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_MASK 0x00000010L +#define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN_MASK 0x00000020L +#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_MASK 0x00000040L +#define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN_MASK 0x00000080L +#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_MASK 0x00000100L +#define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN_MASK 0x00000200L +#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000400L +#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00000800L +#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_MASK 0x00001000L +#define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L +#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00004000L +#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00008000L +#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00010000L +#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00020000L +#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00040000L +#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00080000L +#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00100000L +#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00200000L +#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00400000L +#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00800000L +//RPB_BLOCKLEVEL_CONF +#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0 +#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x2 +#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL__SHIFT 0x3 +#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL__SHIFT 0x5 +#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x7 +#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x9 +#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0xb +#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xd +#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xe +#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10 +#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0x11 +#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x13 +#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L +#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00000004L +#define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL_MASK 0x00000018L +#define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL_MASK 0x00000060L +#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x00000180L +#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x00000600L +#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00001800L +#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00002000L +#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x0000C000L +#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L +#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x00060000L +#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00080000L +//RPB_TAG_CONF +#define RPB_TAG_CONF__RPB_IO_RD__SHIFT 0x0 +#define RPB_TAG_CONF__RPB_IO_WR__SHIFT 0xa +#define RPB_TAG_CONF__RPB_IO_MAX_LIMIT__SHIFT 0x14 +#define RPB_TAG_CONF__RPB_IO_RD_MASK 0x000003FFL +#define RPB_TAG_CONF__RPB_IO_WR_MASK 0x000FFC00L +#define RPB_TAG_CONF__RPB_IO_MAX_LIMIT_MASK 0x7FF00000L +//RPB_ARB_CNTL +#define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0 +#define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8 +#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10 +#define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18 +#define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19 +#define RPB_ARB_CNTL__RPB_VC0_CRD__SHIFT 0x1a +#define RPB_ARB_CNTL__DISABLE_FED__SHIFT 0x1f +#define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL +#define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L +#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L +#define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L +#define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L +#define RPB_ARB_CNTL__RPB_VC0_CRD_MASK 0x7C000000L +#define RPB_ARB_CNTL__DISABLE_FED_MASK 0x80000000L +//RPB_ARB_CNTL2 +#define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0 +#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8 +#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10 +#define RPB_ARB_CNTL2__RPB_VC1_CRD__SHIFT 0x18 +#define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL +#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L +#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L +#define RPB_ARB_CNTL2__RPB_VC1_CRD_MASK 0x1F000000L +//RPB_BIF_CNTL +#define RPB_BIF_CNTL__VC0_SWITCH_NUM__SHIFT 0x0 +#define RPB_BIF_CNTL__VC1_SWITCH_NUM__SHIFT 0x8 +#define RPB_BIF_CNTL__VC2_SWITCH_NUM__SHIFT 0x10 +#define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN__SHIFT 0x18 +#define RPB_BIF_CNTL__TR_QOS_VC__SHIFT 0x19 +#define RPB_BIF_CNTL__RESERVE__SHIFT 0x1c +#define RPB_BIF_CNTL__VC0_SWITCH_NUM_MASK 0x000000FFL +#define RPB_BIF_CNTL__VC1_SWITCH_NUM_MASK 0x0000FF00L +#define RPB_BIF_CNTL__VC2_SWITCH_NUM_MASK 0x00FF0000L +#define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN_MASK 0x01000000L +#define RPB_BIF_CNTL__TR_QOS_VC_MASK 0x0E000000L +#define RPB_BIF_CNTL__RESERVE_MASK 0xF0000000L +//RPB_BIF_CNTL2 +#define RPB_BIF_CNTL2__ARB_MODE__SHIFT 0x0 +#define RPB_BIF_CNTL2__DRAIN_VC_NUM__SHIFT 0x1 +#define RPB_BIF_CNTL2__SWITCH_ENABLE__SHIFT 0x3 +#define RPB_BIF_CNTL2__SWITCH_THRESHOLD__SHIFT 0x4 +#define RPB_BIF_CNTL2__PAGE_PRI_EN__SHIFT 0xc +#define RPB_BIF_CNTL2__VC5_TR_PRI_EN__SHIFT 0xd +#define RPB_BIF_CNTL2__VC0_TR_PRI_EN__SHIFT 0xe +#define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE__SHIFT 0xf +#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE__SHIFT 0x10 +#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_EN__SHIFT 0x11 +#define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN__SHIFT 0x12 +#define RPB_BIF_CNTL2__ATHUB_NBIF_UNITID__SHIFT 0x13 +#define RPB_BIF_CNTL2__RESERVE__SHIFT 0x1e +#define RPB_BIF_CNTL2__ARB_MODE_MASK 0x00000001L +#define RPB_BIF_CNTL2__DRAIN_VC_NUM_MASK 0x00000006L +#define RPB_BIF_CNTL2__SWITCH_ENABLE_MASK 0x00000008L +#define RPB_BIF_CNTL2__SWITCH_THRESHOLD_MASK 0x00000FF0L +#define RPB_BIF_CNTL2__PAGE_PRI_EN_MASK 0x00001000L +#define RPB_BIF_CNTL2__VC5_TR_PRI_EN_MASK 0x00002000L +#define RPB_BIF_CNTL2__VC0_TR_PRI_EN_MASK 0x00004000L +#define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE_MASK 0x00008000L +#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_MASK 0x00010000L +#define RPB_BIF_CNTL2__VC1_CHAINED_OVERRIDE_EN_MASK 0x00020000L +#define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN_MASK 0x00040000L +#define RPB_BIF_CNTL2__ATHUB_NBIF_UNITID_MASK 0x3FF80000L +#define RPB_BIF_CNTL2__RESERVE_MASK 0xC0000000L +//RPB_SDPPORT_CNTL +#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0 +#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1 +#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3 +#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4 +#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5 +#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6 +#define RPB_SDPPORT_CNTL__RESERVE1__SHIFT 0xa +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16 +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17 +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18 +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19 +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b +#define RPB_SDPPORT_CNTL__CG_BUSY_PORT__SHIFT 0x1c +#define RPB_SDPPORT_CNTL__RESERVE__SHIFT 0x1d +#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L +#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L +#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L +#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L +#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L +#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L +#define RPB_SDPPORT_CNTL__RESERVE1_MASK 0x003FFC00L +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L +#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L +#define RPB_SDPPORT_CNTL__CG_BUSY_PORT_MASK 0x10000000L +#define RPB_SDPPORT_CNTL__RESERVE_MASK 0xE0000000L +//RPB_NBIF_SDPPORT_CNTL +#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD__SHIFT 0x0 +#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD__SHIFT 0x8 +#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD__SHIFT 0x10 +#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD__SHIFT 0x18 +#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD_MASK 0x000000FFL +#define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD_MASK 0x0000FF00L +#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD_MASK 0x00FF0000L +#define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD_MASK 0xFF000000L +//RPB_DEINTRLV_COMBINE_CNTL +#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0 +#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4 +#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5 +#define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD__SHIFT 0x6 +#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN__SHIFT 0xe +#define RPB_DEINTRLV_COMBINE_CNTL__RESERVE__SHIFT 0xf +#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL +#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L +#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L +#define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD_MASK 0x00003FC0L +#define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN_MASK 0x00004000L +#define RPB_DEINTRLV_COMBINE_CNTL__RESERVE_MASK 0xFFFF8000L +//RPB_VC_SWITCH_RDWR +#define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0 +#define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2 +#define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa +#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD__SHIFT 0x12 +#define RPB_VC_SWITCH_RDWR__CENTER_MARGIN__SHIFT 0x1a +#define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L +#define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL +#define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L +#define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD_MASK 0x03FC0000L +#define RPB_VC_SWITCH_RDWR__CENTER_MARGIN_MASK 0xFC000000L +//RPB_ATS_CNTL3 +#define RPB_ATS_CNTL3__RPB_ATS_VC5_TR__SHIFT 0x0 +#define RPB_ATS_CNTL3__RPB_ATS_VC0_TR__SHIFT 0x9 +#define RPB_ATS_CNTL3__RPB_ATS_PR__SHIFT 0x12 +#define RPB_ATS_CNTL3__RPB_ATS_VC5_TR_MASK 0x000001FFL +#define RPB_ATS_CNTL3__RPB_ATS_VC0_TR_MASK 0x0003FE00L +#define RPB_ATS_CNTL3__RPB_ATS_PR_MASK 0x07FC0000L +//RPB_DF_SDPPORT_CNTL +#define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD__SHIFT 0x0 +#define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD__SHIFT 0x6 +#define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD__SHIFT 0xe +#define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE__SHIFT 0x12 +#define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER__SHIFT 0x13 +#define RPB_DF_SDPPORT_CNTL__DF_RAW_EA_CHECK_ENABLE__SHIFT 0x1b +#define RPB_DF_SDPPORT_CNTL__DF_RAW_CHECK_ENABLE__SHIFT 0x1c +#define RPB_DF_SDPPORT_CNTL__DF_RAAT_CHECK_ENABLE__SHIFT 0x1d +#define RPB_DF_SDPPORT_CNTL__DF_ATAR_CHECK_ENABLE__SHIFT 0x1e +#define RPB_DF_SDPPORT_CNTL__DF_VC3_READ_CHECK__SHIFT 0x1f +#define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD_MASK 0x0000003FL +#define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD_MASK 0x00003FC0L +#define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD_MASK 0x0003C000L +#define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE_MASK 0x00040000L +#define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER_MASK 0x07F80000L +#define RPB_DF_SDPPORT_CNTL__DF_RAW_EA_CHECK_ENABLE_MASK 0x08000000L +#define RPB_DF_SDPPORT_CNTL__DF_RAW_CHECK_ENABLE_MASK 0x10000000L +#define RPB_DF_SDPPORT_CNTL__DF_RAAT_CHECK_ENABLE_MASK 0x20000000L +#define RPB_DF_SDPPORT_CNTL__DF_ATAR_CHECK_ENABLE_MASK 0x40000000L +#define RPB_DF_SDPPORT_CNTL__DF_VC3_READ_CHECK_MASK 0x80000000L +//RPB_ATS_CNTL +#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0 +#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1 +#define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2 +#define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7 +#define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM__SHIFT 0xf +#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13 +#define RPB_ATS_CNTL__WR_AT__SHIFT 0x17 +#define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE__SHIFT 0x19 +#define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE__SHIFT 0x1a +#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L +#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L +#define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL +#define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L +#define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM_MASK 0x00078000L +#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L +#define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L +#define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE_MASK 0x02000000L +#define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE_MASK 0x04000000L +//RPB_ATS_CNTL2 +#define RPB_ATS_CNTL2__INVAL_COM_CMD__SHIFT 0x0 +#define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x6 +#define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0xc +#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0x12 +#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0x15 +#define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x18 +#define RPB_ATS_CNTL2__RPB_VC5_CRD__SHIFT 0x1a +#define RPB_ATS_CNTL2__INVAL_COM_CMD_MASK 0x0000003FL +#define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x00000FC0L +#define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x0003F000L +#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x001C0000L +#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00E00000L +#define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x03000000L +#define RPB_ATS_CNTL2__RPB_VC5_CRD_MASK 0x7C000000L +//RPB_PERFCOUNTER0_CFG +#define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//RPB_PERFCOUNTER1_CFG +#define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//RPB_PERFCOUNTER2_CFG +#define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//RPB_PERFCOUNTER3_CFG +#define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0 +#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8 +#define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18 +#define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c +#define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d +#define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL +#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L +#define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L +#define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L +//RPB_PERFCOUNTER_RSLT_CNTL +#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//RPB_PERF_COUNTER_CNTL +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x2 +#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x3 +#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x4 +#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x5 +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x9 +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0xe +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x13 +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x18 +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L +#define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L +#define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L +#define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L +#define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001E0L +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003E00L +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007C000L +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00F80000L +#define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1F000000L +//RPB_PERFCOUNTER_HI +#define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//RPB_PERFCOUNTER_LO +#define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//RPB_PERF_COUNTER_STATUS +#define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE__SHIFT 0x0 +#define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE_MASK 0xFFFFFFFFL + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h index 222fa8d13269..a05bf8e4f58d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h @@ -626,6 +626,8 @@ #define regDTBCLK_DTO2_MODULO_BASE_IDX 2 #define regDTBCLK_DTO3_MODULO 0x0022 #define regDTBCLK_DTO3_MODULO_BASE_IDX 2 +#define regHDMICHARCLK0_CLOCK_CNTL 0x004a +#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2 #define regPHYASYMCLK_CLOCK_CNTL 0x0052 #define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYBSYMCLK_CLOCK_CNTL 0x0053 @@ -638,6 +640,8 @@ #define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYFSYMCLK_CLOCK_CNTL 0x0057 #define regPHYFSYMCLK_CLOCK_CNTL_BASE_IDX 2 +#define regHDMISTREAMCLK_CNTL 0x0059 +#define regHDMISTREAMCLK_CNTL_BASE_IDX 2 #define regDCCG_GATE_DISABLE_CNTL3 0x005a #define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2 #define regHDMISTREAMCLK0_DTO_PARAM 0x005b diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h index 8ddb03a1dc39..df84941bbe5b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h @@ -1933,6 +1933,11 @@ //DTBCLK_DTO3_MODULO #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0 #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL +//HDMICHARCLK0_CLOCK_CNTL +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L //PHYASYMCLK_CLOCK_CNTL #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0 #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4 @@ -1967,6 +1972,11 @@ #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT 0x4 #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK 0x00000001L #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK 0x00000030L +//HDMISTREAMCLK_CNTL +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x10 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000003L +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00010000L //DCCG_GATE_DISABLE_CNTL3 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h index 7cf0a625277b..33b5d9be06b1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h @@ -4802,6 +4802,10 @@ #define regCM0_CM_DEALPHA_BASE_IDX 2 #define regCM0_CM_COEF_FORMAT 0x0d8c #define regCM0_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d +#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e +#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5210,6 +5214,10 @@ #define regCM1_CM_DEALPHA_BASE_IDX 2 #define regCM1_CM_COEF_FORMAT 0x0ef7 #define regCM1_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8 +#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9 +#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5618,6 +5626,10 @@ #define regCM2_CM_DEALPHA_BASE_IDX 2 #define regCM2_CM_COEF_FORMAT 0x1062 #define regCM2_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_INDEX 0x1063 +#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_DATA 0x1064 +#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -6026,6 +6038,10 @@ #define regCM3_CM_DEALPHA_BASE_IDX 2 #define regCM3_CM_COEF_FORMAT 0x11cd #define regCM3_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce +#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_DATA 0x11cf +#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -10568,6 +10584,8 @@ #define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 // addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec @@ -10697,6 +10715,8 @@ #define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 // addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec @@ -10827,6 +10847,8 @@ #define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 // addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec @@ -10957,6 +10979,8 @@ #define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 // addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h index fca72e2ec929..ff77b71167eb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h @@ -16556,6 +16556,13 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L + +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L + #define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0 #define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9 #define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc @@ -27176,6 +27183,23 @@ #define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8 #define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L #define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L + +//DIG0_DIG_BE_CLK_CNTL +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0 +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4 +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5 +#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT 0x6 +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT 0xc +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L +#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK 0x00000040L +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK 0x00001000L +#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L + #define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0 #define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1 #define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2 @@ -36716,6 +36740,17 @@ #define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL + +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L + #define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0 #define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9 #define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc @@ -38488,6 +38523,18 @@ #define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK 0x000001FFL #define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT 0x0 #define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK 0x0003FFFFL +//DWB_OGAM_LUT_CONTROL +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0 +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4 +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT 0x8 +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10 +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK 0x00000100L +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L +#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L + #define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0 #define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4 #define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc @@ -52008,6 +52055,14 @@ #define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT 0x10 #define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT 0x11 #define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT 0x14 +#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT 0x15 +#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT 0x16 +#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT 0x17 +#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT 0x18 +#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT 0x19 +#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT 0x1a +#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT 0x1b +#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT 0x1c #define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK 0x0000007FL #define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK 0x00000200L #define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK 0x00000400L @@ -52019,6 +52074,16 @@ #define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK 0x00010000L #define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK 0x00020000L #define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK 0x00100000L + +#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK 0x00200000L +#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK 0x00400000L +#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK 0x00800000L +#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK 0x01000000L +#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK 0x02000000L +#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK 0x04000000L +#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK 0x08000000L +#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK 0x10000000L + #define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0 #define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1 #define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h new file mode 100644 index 000000000000..9c16611af06b --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h @@ -0,0 +1,219 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _hdp_7_0_0_OFFSET_HEADER +#define _hdp_7_0_0_OFFSET_HEADER + + + +// addressBlock: hdp_hdpdec +// base address: 0x3c80 +#define regHDP_MMHUB_TLVL 0x0008 +#define regHDP_MMHUB_TLVL_BASE_IDX 0 +#define regHDP_MMHUB_UNITID 0x0009 +#define regHDP_MMHUB_UNITID_BASE_IDX 0 +#define regHDP_NONSURFACE_BASE 0x0040 +#define regHDP_NONSURFACE_BASE_BASE_IDX 0 +#define regHDP_NONSURFACE_INFO 0x0041 +#define regHDP_NONSURFACE_INFO_BASE_IDX 0 +#define regHDP_NONSURFACE_BASE_HI 0x0042 +#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0 +#define regHDP_SURFACE_WRITE_FLAGS 0x00c4 +#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0 +#define regHDP_SURFACE_READ_FLAGS 0x00c5 +#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0 +#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6 +#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0 +#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7 +#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0 +#define regHDP_NONSURF_FLAGS 0x00c8 +#define regHDP_NONSURF_FLAGS_BASE_IDX 0 +#define regHDP_NONSURF_FLAGS_CLR 0x00c9 +#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0 +#define regHDP_SW_SEMAPHORE 0x00cd +#define regHDP_SW_SEMAPHORE_BASE_IDX 0 +#define regHDP_DEBUG0 0x00ce +#define regHDP_DEBUG0_BASE_IDX 0 +#define regHDP_LAST_SURFACE_HIT 0x00d0 +#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0 +#define regHDP_OUTSTANDING_REQ 0x00d1 +#define regHDP_OUTSTANDING_REQ_BASE_IDX 0 +#define regHDP_HOST_PATH_CNTL 0x00d2 +#define regHDP_HOST_PATH_CNTL_BASE_IDX 0 +#define regHDP_MISC_CNTL 0x00d3 +#define regHDP_MISC_CNTL_BASE_IDX 0 +#define regHDP_MEM_POWER_CTRL 0x00d4 +#define regHDP_MEM_POWER_CTRL_BASE_IDX 0 +#define regHDP_CLK_CNTL 0x00d5 +#define regHDP_CLK_CNTL_BASE_IDX 0 +#define regHDP_MMHUB_CNTL 0x00d6 +#define regHDP_MMHUB_CNTL_BASE_IDX 0 +#define regHDP_XDP_BUSY_STS 0x00d7 +#define regHDP_XDP_BUSY_STS_BASE_IDX 0 +#define regHDP_XDP_MMHUB_ERROR 0x00d8 +#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0 +#define regHDP_XDP_MMHUB_ERROR_CLR 0x00da +#define regHDP_XDP_MMHUB_ERROR_CLR_BASE_IDX 0 +#define regHDP_VERSION 0x00db +#define regHDP_VERSION_BASE_IDX 0 +#define regHDP_MEMIO_CNTL 0x00f6 +#define regHDP_MEMIO_CNTL_BASE_IDX 0 +#define regHDP_MEMIO_ADDR 0x00f7 +#define regHDP_MEMIO_ADDR_BASE_IDX 0 +#define regHDP_MEMIO_STATUS 0x00f8 +#define regHDP_MEMIO_STATUS_BASE_IDX 0 +#define regHDP_MEMIO_WR_DATA 0x00f9 +#define regHDP_MEMIO_WR_DATA_BASE_IDX 0 +#define regHDP_MEMIO_RD_DATA 0x00fa +#define regHDP_MEMIO_RD_DATA_BASE_IDX 0 +#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100 +#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0 +#define regHDP_XDP_D2H_FLUSH 0x0101 +#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0 +#define regHDP_XDP_D2H_BAR_UPDATE 0x0102 +#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_3 0x0103 +#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_4 0x0104 +#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_5 0x0105 +#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_6 0x0106 +#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_7 0x0107 +#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_8 0x0108 +#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_9 0x0109 +#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_10 0x010a +#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_11 0x010b +#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_12 0x010c +#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_13 0x010d +#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_14 0x010e +#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_15 0x010f +#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_16 0x0110 +#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_17 0x0111 +#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_18 0x0112 +#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_19 0x0113 +#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_20 0x0114 +#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_21 0x0115 +#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_22 0x0116 +#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_23 0x0117 +#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_24 0x0118 +#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_25 0x0119 +#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_26 0x011a +#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_27 0x011b +#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_28 0x011c +#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_29 0x011d +#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_30 0x011e +#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_31 0x011f +#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_32 0x0120 +#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_33 0x0121 +#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0 +#define regHDP_XDP_D2H_RSVD_34 0x0122 +#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0 +#define regHDP_XDP_DIRECT2HDP_LAST 0x0123 +#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR_CFG 0x0124 +#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_OFFSET 0x0125 +#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR0 0x0126 +#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR1 0x0127 +#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR2 0x0128 +#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR3 0x0129 +#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR4 0x012a +#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR5 0x012b +#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0 +#define regHDP_XDP_P2P_MBX_ADDR6 0x012c +#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0 +#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d +#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0 +#define regHDP_XDP_HDP_MC_CFG 0x012e +#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0 +#define regHDP_XDP_HST_CFG 0x012f +#define regHDP_XDP_HST_CFG_BASE_IDX 0 +#define regHDP_XDP_HDP_IPH_CFG 0x0131 +#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR0 0x0134 +#define regHDP_XDP_P2P_BAR0_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR1 0x0135 +#define regHDP_XDP_P2P_BAR1_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR2 0x0136 +#define regHDP_XDP_P2P_BAR2_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR3 0x0137 +#define regHDP_XDP_P2P_BAR3_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR4 0x0138 +#define regHDP_XDP_P2P_BAR4_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR5 0x0139 +#define regHDP_XDP_P2P_BAR5_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR6 0x013a +#define regHDP_XDP_P2P_BAR6_BASE_IDX 0 +#define regHDP_XDP_P2P_BAR7 0x013b +#define regHDP_XDP_P2P_BAR7_BASE_IDX 0 +#define regHDP_XDP_FLUSH_ARMED_STS 0x013c +#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0 +#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d +#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0 +#define regHDP_XDP_STICKY 0x013f +#define regHDP_XDP_STICKY_BASE_IDX 0 +#define regHDP_XDP_CHKN 0x0140 +#define regHDP_XDP_CHKN_BASE_IDX 0 +#define regHDP_XDP_BARS_ADDR_39_36 0x0144 +#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0 +#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145 +#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0 +#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148 +#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149 +#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h new file mode 100644 index 000000000000..afb73c5a4018 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h @@ -0,0 +1,735 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _hdp_7_0_0_SH_MASK_HEADER +#define _hdp_7_0_0_SH_MASK_HEADER + + +// addressBlock: hdp_hdpdec +//HDP_MMHUB_TLVL +#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0 +#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4 +#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8 +#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc +#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10 +#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL +#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L +#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L +#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L +#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L +//HDP_MMHUB_UNITID +#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0 +#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8 +#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10 +#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL +#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L +#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L +//HDP_NONSURFACE_BASE +#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0 +#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL +//HDP_NONSURFACE_INFO +#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4 +#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8 +#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L +#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L +//HDP_NONSURFACE_BASE_HI +#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0 +#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL +//HDP_SURFACE_WRITE_FLAGS +#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0 +#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1 +#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L +#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L +//HDP_SURFACE_READ_FLAGS +#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0 +#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1 +#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L +#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L +//HDP_SURFACE_WRITE_FLAGS_CLR +#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0 +#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1 +#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L +#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L +//HDP_SURFACE_READ_FLAGS_CLR +#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0 +#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1 +#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L +#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L +//HDP_NONSURF_FLAGS +#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0 +#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1 +#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L +#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L +//HDP_NONSURF_FLAGS_CLR +#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0 +#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1 +#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L +#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L +//HDP_SW_SEMAPHORE +#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0 +#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL +//HDP_DEBUG0 +#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0 +#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL +//HDP_LAST_SURFACE_HIT +#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0 +#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L +//HDP_OUTSTANDING_REQ +#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0 +#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8 +#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL +#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L +//HDP_HOST_PATH_CNTL +#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9 +#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12 +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13 +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15 +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16 +#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d +#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L +#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L +#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L +#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L +//HDP_MISC_CNTL +#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2 +#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5 +#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8 +#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9 +#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb +#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe +#define HDP_MISC_CNTL__NACK_ENABLE__SHIFT 0x13 +#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE__SHIFT 0x14 +#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15 +#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16 +#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0x17 +#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18 +#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e +#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL +#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L +#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L +#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L +#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L +#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L +#define HDP_MISC_CNTL__NACK_ENABLE_MASK 0x00080000L +#define HDP_MISC_CNTL__ATOMIC_NACK_ENABLE_MASK 0x00100000L +#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L +#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L +#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00800000L +#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L +#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L +//HDP_MEM_POWER_CTRL +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN__SHIFT 0x0 +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN__SHIFT 0x1 +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN__SHIFT 0x2 +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN__SHIFT 0x3 +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS__SHIFT 0x4 +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8 +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10 +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11 +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12 +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13 +#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14 +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18 +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK 0x00000002L +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK 0x00000004L +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK 0x00000008L +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_IDLE_HYSTERESIS_MASK 0x00000070L +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L +#define HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L +#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L +//HDP_CLK_CNTL +#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0 +#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a +#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b +#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c +#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d +#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e +#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f +#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL +#define HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L +#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L +#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L +#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L +#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L +#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L +//HDP_MMHUB_CNTL +#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0 +#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1 +#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2 +#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4 +#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5 +#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6 +#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L +#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L +#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L +#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L +#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L +#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L +//HDP_XDP_BUSY_STS +#define HDP_XDP_BUSY_STS__BUSY_BITS_0__SHIFT 0x0 +#define HDP_XDP_BUSY_STS__BUSY_BITS_1__SHIFT 0x1 +#define HDP_XDP_BUSY_STS__BUSY_BITS_2__SHIFT 0x2 +#define HDP_XDP_BUSY_STS__BUSY_BITS_3__SHIFT 0x3 +#define HDP_XDP_BUSY_STS__BUSY_BITS_4__SHIFT 0x4 +#define HDP_XDP_BUSY_STS__BUSY_BITS_5__SHIFT 0x5 +#define HDP_XDP_BUSY_STS__BUSY_BITS_6__SHIFT 0x6 +#define HDP_XDP_BUSY_STS__BUSY_BITS_7__SHIFT 0x7 +#define HDP_XDP_BUSY_STS__BUSY_BITS_8__SHIFT 0x8 +#define HDP_XDP_BUSY_STS__BUSY_BITS_9__SHIFT 0x9 +#define HDP_XDP_BUSY_STS__BUSY_BITS_10__SHIFT 0xa +#define HDP_XDP_BUSY_STS__BUSY_BITS_11__SHIFT 0xb +#define HDP_XDP_BUSY_STS__BUSY_BITS_12__SHIFT 0xc +#define HDP_XDP_BUSY_STS__BUSY_BITS_13__SHIFT 0xd +#define HDP_XDP_BUSY_STS__BUSY_BITS_14__SHIFT 0xe +#define HDP_XDP_BUSY_STS__BUSY_BITS_15__SHIFT 0xf +#define HDP_XDP_BUSY_STS__BUSY_BITS_16__SHIFT 0x10 +#define HDP_XDP_BUSY_STS__BUSY_BITS_17__SHIFT 0x11 +#define HDP_XDP_BUSY_STS__BUSY_BITS_18__SHIFT 0x12 +#define HDP_XDP_BUSY_STS__BUSY_BITS_19__SHIFT 0x13 +#define HDP_XDP_BUSY_STS__BUSY_BITS_20__SHIFT 0x14 +#define HDP_XDP_BUSY_STS__BUSY_BITS_21__SHIFT 0x15 +#define HDP_XDP_BUSY_STS__BUSY_BITS_22__SHIFT 0x16 +#define HDP_XDP_BUSY_STS__BUSY_BITS_23__SHIFT 0x17 +#define HDP_XDP_BUSY_STS__Z_FENCE_BIT__SHIFT 0x18 +#define HDP_XDP_BUSY_STS__BUSY_BITS_0_MASK 0x00000001L +#define HDP_XDP_BUSY_STS__BUSY_BITS_1_MASK 0x00000002L +#define HDP_XDP_BUSY_STS__BUSY_BITS_2_MASK 0x00000004L +#define HDP_XDP_BUSY_STS__BUSY_BITS_3_MASK 0x00000008L +#define HDP_XDP_BUSY_STS__BUSY_BITS_4_MASK 0x00000010L +#define HDP_XDP_BUSY_STS__BUSY_BITS_5_MASK 0x00000020L +#define HDP_XDP_BUSY_STS__BUSY_BITS_6_MASK 0x00000040L +#define HDP_XDP_BUSY_STS__BUSY_BITS_7_MASK 0x00000080L +#define HDP_XDP_BUSY_STS__BUSY_BITS_8_MASK 0x00000100L +#define HDP_XDP_BUSY_STS__BUSY_BITS_9_MASK 0x00000200L +#define HDP_XDP_BUSY_STS__BUSY_BITS_10_MASK 0x00000400L +#define HDP_XDP_BUSY_STS__BUSY_BITS_11_MASK 0x00000800L +#define HDP_XDP_BUSY_STS__BUSY_BITS_12_MASK 0x00001000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_13_MASK 0x00002000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_14_MASK 0x00004000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_15_MASK 0x00008000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_16_MASK 0x00010000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_17_MASK 0x00020000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_18_MASK 0x00040000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_19_MASK 0x00080000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_20_MASK 0x00100000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_21_MASK 0x00200000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_22_MASK 0x00400000L +#define HDP_XDP_BUSY_STS__BUSY_BITS_23_MASK 0x00800000L +#define HDP_XDP_BUSY_STS__Z_FENCE_BIT_MASK 0x01000000L +//HDP_XDP_MMHUB_ERROR +#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1 +#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2 +#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3 +#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5 +#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6 +#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7 +#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9 +#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa +#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb +#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd +#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe +#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf +#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11 +#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12 +#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13 +#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15 +#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16 +#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17 +#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L +#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L +#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L +#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L +#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L +#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L +#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L +#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L +#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L +#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L +#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L +#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L +#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L +#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L +#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L +#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L +#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L +#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L +//HDP_XDP_MMHUB_ERROR_CLR +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR__SHIFT 0x1 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR__SHIFT 0x2 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR__SHIFT 0x3 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR__SHIFT 0x4 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR__SHIFT 0x5 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR__SHIFT 0x6 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR__SHIFT 0x7 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR__SHIFT 0x9 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR__SHIFT 0xa +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR__SHIFT 0xb +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR__SHIFT 0xc +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR__SHIFT 0xd +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR__SHIFT 0xe +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR__SHIFT 0xf +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR__SHIFT 0x10 +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR__SHIFT 0x11 +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR__SHIFT 0x12 +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR__SHIFT 0x13 +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR__SHIFT 0x15 +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR__SHIFT 0x16 +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR__SHIFT 0x17 +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_01_CLR_MASK 0x00000002L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_10_CLR_MASK 0x00000004L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BRESP_11_CLR_MASK 0x00000008L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_FED_CLR_MASK 0x00000010L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_01_CLR_MASK 0x00000020L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_10_CLR_MASK 0x00000040L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_BUSER_NACK_11_CLR_MASK 0x00000080L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_01_CLR_MASK 0x00000200L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_10_CLR_MASK 0x00000400L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RRESP_11_CLR_MASK 0x00000800L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_FED_CLR_MASK 0x00001000L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_01_CLR_MASK 0x00002000L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_10_CLR_MASK 0x00004000L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_RUSER_NACK_11_CLR_MASK 0x00008000L +#define HDP_XDP_MMHUB_ERROR_CLR__HDP_WUSER_FED_CLR_MASK 0x00010000L +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_01_CLR_MASK 0x00020000L +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_10_CLR_MASK 0x00040000L +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BRESP_11_CLR_MASK 0x00080000L +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_01_CLR_MASK 0x00200000L +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_10_CLR_MASK 0x00400000L +#define HDP_XDP_MMHUB_ERROR_CLR__XDP_BUSER_NACK_11_CLR_MASK 0x00800000L +//HDP_VERSION +#define HDP_VERSION__MINVER__SHIFT 0x0 +#define HDP_VERSION__MAJVER__SHIFT 0x8 +#define HDP_VERSION__REV__SHIFT 0x10 +#define HDP_VERSION__MINVER_MASK 0x000000FFL +#define HDP_VERSION__MAJVER_MASK 0x0000FF00L +#define HDP_VERSION__REV_MASK 0x00FF0000L +//HDP_MEMIO_CNTL +#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0 +#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1 +#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2 +#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6 +#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7 +#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8 +#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe +#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf +#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10 +#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11 +#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L +#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L +#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL +#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L +#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L +#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L +#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L +#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L +#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L +#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L +//HDP_MEMIO_ADDR +#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0 +#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL +//HDP_MEMIO_STATUS +#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0 +#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1 +#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2 +#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3 +#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L +#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L +#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L +#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L +//HDP_MEMIO_WR_DATA +#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0 +#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL +//HDP_MEMIO_RD_DATA +#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0 +#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL +//HDP_XDP_DIRECT2HDP_FIRST +#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0 +#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_FLUSH +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14 +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L +#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L +//HDP_XDP_D2H_BAR_UPDATE +#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0 +#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10 +#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14 +#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL +#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L +#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L +//HDP_XDP_D2H_RSVD_3 +#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_4 +#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_5 +#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_6 +#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_7 +#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_8 +#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_9 +#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_10 +#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_11 +#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_12 +#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_13 +#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_14 +#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_15 +#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_16 +#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_17 +#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_18 +#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_19 +#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_20 +#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_21 +#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_22 +#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_23 +#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_24 +#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_25 +#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_26 +#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_27 +#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_28 +#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_29 +#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_30 +#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_31 +#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_32 +#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_33 +#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_D2H_RSVD_34 +#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0 +#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_DIRECT2HDP_LAST +#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0 +#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL +//HDP_XDP_P2P_BAR_CFG +#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0 +#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4 +#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL +#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L +//HDP_XDP_P2P_MBX_OFFSET +#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL +//HDP_XDP_P2P_MBX_ADDR0 +#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_P2P_MBX_ADDR1 +#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_P2P_MBX_ADDR2 +#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_P2P_MBX_ADDR3 +#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_P2P_MBX_ADDR4 +#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_P2P_MBX_ADDR5 +#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_P2P_MBX_ADDR6 +#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0 +#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3 +#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18 +#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L +#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L +#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L +//HDP_XDP_HDP_MBX_MC_CFG +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0 +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4 +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8 +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L +#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L +//HDP_XDP_HDP_MC_CFG +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0 +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1 +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2 +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3 +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4 +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8 +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L +#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L +//HDP_XDP_HST_CFG +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0 +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1 +#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3 +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4 +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5 +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L +#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L +#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L +//HDP_XDP_HDP_IPH_CFG +#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc +#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd +#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L +#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L +//HDP_XDP_P2P_BAR0 +#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR1 +#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR2 +#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR3 +#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR4 +#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR5 +#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR6 +#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L +//HDP_XDP_P2P_BAR7 +#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0 +#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10 +#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14 +#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL +#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L +#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L +//HDP_XDP_FLUSH_ARMED_STS +#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0 +#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL +//HDP_XDP_FLUSH_CNTR0_STS +#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0 +#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL +//HDP_XDP_STICKY +#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0 +#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10 +#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL +#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L +//HDP_XDP_CHKN +#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0 +#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8 +#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10 +#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18 +#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL +#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L +#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L +#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L +//HDP_XDP_BARS_ADDR_39_36 +#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0 +#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4 +#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8 +#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc +#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10 +#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14 +#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18 +#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c +#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL +#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L +#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L +#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L +#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L +#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L +#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L +#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L +//HDP_XDP_MC_VM_FB_LOCATION_BASE +#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0 +#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL +//HDP_XDP_GPU_IOV_VIOLATION_LOG +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L +#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x01F00000L +//HDP_XDP_GPU_IOV_VIOLATION_LOG2 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h new file mode 100644 index 000000000000..c783b8ea4698 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h @@ -0,0 +1,388 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _lsdma_7_0_0_OFFSET_HEADER +#define _lsdma_7_0_0_OFFSET_HEADER + + + +// addressBlock: lsdma0_lsdma0dec +// base address: 0x45000 +#define regLSDMA_UCODE_ADDR 0x0000 +#define regLSDMA_UCODE_ADDR_BASE_IDX 0 +#define regLSDMA_UCODE_DATA 0x0001 +#define regLSDMA_UCODE_DATA_BASE_IDX 0 +#define regLSDMA_ERROR_INJECT_CNTL 0x0004 +#define regLSDMA_ERROR_INJECT_CNTL_BASE_IDX 0 +#define regLSDMA_ERROR_INJECT_SELECT 0x0005 +#define regLSDMA_ERROR_INJECT_SELECT_BASE_IDX 0 +#define regLSDMA_CONTEXT_GROUP_BOUNDARY 0x001f +#define regLSDMA_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0 +#define regLSDMA_RB_RPTR_FETCH_HI 0x0020 +#define regLSDMA_RB_RPTR_FETCH_HI_BASE_IDX 0 +#define regLSDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define regLSDMA_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0 +#define regLSDMA_RB_RPTR_FETCH 0x0022 +#define regLSDMA_RB_RPTR_FETCH_BASE_IDX 0 +#define regLSDMA_IB_OFFSET_FETCH 0x0023 +#define regLSDMA_IB_OFFSET_FETCH_BASE_IDX 0 +#define regLSDMA_PROGRAM 0x0024 +#define regLSDMA_PROGRAM_BASE_IDX 0 +#define regLSDMA_STATUS_REG 0x0025 +#define regLSDMA_STATUS_REG_BASE_IDX 0 +#define regLSDMA_STATUS1_REG 0x0026 +#define regLSDMA_STATUS1_REG_BASE_IDX 0 +#define regLSDMA_RD_BURST_CNTL 0x0027 +#define regLSDMA_RD_BURST_CNTL_BASE_IDX 0 +#define regLSDMA_HBM_PAGE_CONFIG 0x0028 +#define regLSDMA_HBM_PAGE_CONFIG_BASE_IDX 0 +#define regLSDMA_UCODE_CHECKSUM 0x0029 +#define regLSDMA_UCODE_CHECKSUM_BASE_IDX 0 +#define regLSDMA_FREEZE 0x002b +#define regLSDMA_FREEZE_BASE_IDX 0 +#define regLSDMA_DCC_CNTL 0x002d +#define regLSDMA_DCC_CNTL_BASE_IDX 0 +#define regLSDMA_POWER_GATING 0x002e +#define regLSDMA_POWER_GATING_BASE_IDX 0 +#define regLSDMA_PGFSM_CONFIG 0x002f +#define regLSDMA_PGFSM_CONFIG_BASE_IDX 0 +#define regLSDMA_PGFSM_WRITE 0x0030 +#define regLSDMA_PGFSM_WRITE_BASE_IDX 0 +#define regLSDMA_PGFSM_READ 0x0031 +#define regLSDMA_PGFSM_READ_BASE_IDX 0 +#define regLSDMA_BA_THRESHOLD 0x0033 +#define regLSDMA_BA_THRESHOLD_BASE_IDX 0 +#define regLSDMA_ID 0x0034 +#define regLSDMA_ID_BASE_IDX 0 +#define regLSDMA_VERSION 0x0035 +#define regLSDMA_VERSION_BASE_IDX 0 +#define regLSDMA_EDC_COUNTER 0x0036 +#define regLSDMA_EDC_COUNTER_BASE_IDX 0 +#define regLSDMA_EDC_COUNTER2 0x0037 +#define regLSDMA_EDC_COUNTER2_BASE_IDX 0 +#define regLSDMA_STATUS2_REG 0x0038 +#define regLSDMA_STATUS2_REG_BASE_IDX 0 +#define regLSDMA_ATOMIC_CNTL 0x0039 +#define regLSDMA_ATOMIC_CNTL_BASE_IDX 0 +#define regLSDMA_ATOMIC_PREOP_LO 0x003a +#define regLSDMA_ATOMIC_PREOP_LO_BASE_IDX 0 +#define regLSDMA_ATOMIC_PREOP_HI 0x003b +#define regLSDMA_ATOMIC_PREOP_HI_BASE_IDX 0 +#define regLSDMA_UTCL1_CNTL 0x003c +#define regLSDMA_UTCL1_CNTL_BASE_IDX 0 +#define regLSDMA_UTCL1_WATERMK 0x003d +#define regLSDMA_UTCL1_WATERMK_BASE_IDX 0 +#define regLSDMA_UTCL1_RD_STATUS 0x003e +#define regLSDMA_UTCL1_RD_STATUS_BASE_IDX 0 +#define regLSDMA_UTCL1_WR_STATUS 0x003f +#define regLSDMA_UTCL1_WR_STATUS_BASE_IDX 0 +#define regLSDMA_UTCL1_INV0 0x0040 +#define regLSDMA_UTCL1_INV0_BASE_IDX 0 +#define regLSDMA_UTCL1_INV1 0x0041 +#define regLSDMA_UTCL1_INV1_BASE_IDX 0 +#define regLSDMA_UTCL1_INV2 0x0042 +#define regLSDMA_UTCL1_INV2_BASE_IDX 0 +#define regLSDMA_UTCL1_RD_XNACK0 0x0043 +#define regLSDMA_UTCL1_RD_XNACK0_BASE_IDX 0 +#define regLSDMA_UTCL1_RD_XNACK1 0x0044 +#define regLSDMA_UTCL1_RD_XNACK1_BASE_IDX 0 +#define regLSDMA_UTCL1_WR_XNACK0 0x0045 +#define regLSDMA_UTCL1_WR_XNACK0_BASE_IDX 0 +#define regLSDMA_UTCL1_WR_XNACK1 0x0046 +#define regLSDMA_UTCL1_WR_XNACK1_BASE_IDX 0 +#define regLSDMA_UTCL1_TIMEOUT 0x0047 +#define regLSDMA_UTCL1_TIMEOUT_BASE_IDX 0 +#define regLSDMA_UTCL1_PAGE 0x0048 +#define regLSDMA_UTCL1_PAGE_BASE_IDX 0 +#define regLSDMA_RELAX_ORDERING_LUT 0x004a +#define regLSDMA_RELAX_ORDERING_LUT_BASE_IDX 0 +#define regLSDMA_CHICKEN_BITS_2 0x004b +#define regLSDMA_CHICKEN_BITS_2_BASE_IDX 0 +#define regLSDMA_STATUS3_REG 0x004c +#define regLSDMA_STATUS3_REG_BASE_IDX 0 +#define regLSDMA_PHYSICAL_ADDR_LO 0x004d +#define regLSDMA_PHYSICAL_ADDR_LO_BASE_IDX 0 +#define regLSDMA_PHYSICAL_ADDR_HI 0x004e +#define regLSDMA_PHYSICAL_ADDR_HI_BASE_IDX 0 +#define regLSDMA_ECC_CNTL 0x004f +#define regLSDMA_ECC_CNTL_BASE_IDX 0 +#define regLSDMA_ERROR_LOG 0x0050 +#define regLSDMA_ERROR_LOG_BASE_IDX 0 +#define regLSDMA_PUB_DUMMY0 0x0051 +#define regLSDMA_PUB_DUMMY0_BASE_IDX 0 +#define regLSDMA_PUB_DUMMY1 0x0052 +#define regLSDMA_PUB_DUMMY1_BASE_IDX 0 +#define regLSDMA_PUB_DUMMY2 0x0053 +#define regLSDMA_PUB_DUMMY2_BASE_IDX 0 +#define regLSDMA_PUB_DUMMY3 0x0054 +#define regLSDMA_PUB_DUMMY3_BASE_IDX 0 +#define regLSDMA_F32_COUNTER 0x0055 +#define regLSDMA_F32_COUNTER_BASE_IDX 0 +#define regLSDMA_PERFCNT_PERFCOUNTER0_CFG 0x0057 +#define regLSDMA_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 0 +#define regLSDMA_PERFCNT_PERFCOUNTER1_CFG 0x0058 +#define regLSDMA_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 0 +#define regLSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x0059 +#define regLSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +#define regLSDMA_PERFCNT_MISC_CNTL 0x005a +#define regLSDMA_PERFCNT_MISC_CNTL_BASE_IDX 0 +#define regLSDMA_PERFCNT_PERFCOUNTER_LO 0x005b +#define regLSDMA_PERFCNT_PERFCOUNTER_LO_BASE_IDX 0 +#define regLSDMA_PERFCNT_PERFCOUNTER_HI 0x005c +#define regLSDMA_PERFCNT_PERFCOUNTER_HI_BASE_IDX 0 +#define regLSDMA_CRD_CNTL 0x005d +#define regLSDMA_CRD_CNTL_BASE_IDX 0 +#define regLSDMA_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define regLSDMA_ULV_CNTL 0x005f +#define regLSDMA_ULV_CNTL_BASE_IDX 0 +#define regLSDMA_EA_DBIT_ADDR_DATA 0x0060 +#define regLSDMA_EA_DBIT_ADDR_DATA_BASE_IDX 0 +#define regLSDMA_EA_DBIT_ADDR_INDEX 0x0061 +#define regLSDMA_EA_DBIT_ADDR_INDEX_BASE_IDX 0 +#define regLSDMA_STATUS4_REG 0x0063 +#define regLSDMA_STATUS4_REG_BASE_IDX 0 +#define regLSDMA_CE_CTRL 0x0066 +#define regLSDMA_CE_CTRL_BASE_IDX 0 +#define regLSDMA_EXCEPTION_STATUS 0x0067 +#define regLSDMA_EXCEPTION_STATUS_BASE_IDX 0 +#define regLSDMA_INT_CNTL 0x0069 +#define regLSDMA_INT_CNTL_BASE_IDX 0 +#define regLSDMA_MEM_POWER_CTRL 0x006a +#define regLSDMA_MEM_POWER_CTRL_BASE_IDX 0 +#define regLSDMA_CLK_CTRL 0x006b +#define regLSDMA_CLK_CTRL_BASE_IDX 0 +#define regLSDMA_CNTL 0x006c +#define regLSDMA_CNTL_BASE_IDX 0 +#define regLSDMA_CHICKEN_BITS 0x006d +#define regLSDMA_CHICKEN_BITS_BASE_IDX 0 +#define regLSDMA_PIO_SRC_ADDR_LO 0x0070 +#define regLSDMA_PIO_SRC_ADDR_LO_BASE_IDX 0 +#define regLSDMA_PIO_SRC_ADDR_HI 0x0071 +#define regLSDMA_PIO_SRC_ADDR_HI_BASE_IDX 0 +#define regLSDMA_PIO_DST_ADDR_LO 0x0072 +#define regLSDMA_PIO_DST_ADDR_LO_BASE_IDX 0 +#define regLSDMA_PIO_DST_ADDR_HI 0x0073 +#define regLSDMA_PIO_DST_ADDR_HI_BASE_IDX 0 +#define regLSDMA_PIO_COMMAND 0x0074 +#define regLSDMA_PIO_COMMAND_BASE_IDX 0 +#define regLSDMA_PIO_CONSTFILL_DATA 0x0075 +#define regLSDMA_PIO_CONSTFILL_DATA_BASE_IDX 0 +#define regLSDMA_PIO_CONTROL 0x0076 +#define regLSDMA_PIO_CONTROL_BASE_IDX 0 +#define regLSDMA_PIO_STATUS 0x007a +#define regLSDMA_PIO_STATUS_BASE_IDX 0 +#define regLSDMA_PF_PIO_STATUS 0x007b +#define regLSDMA_PF_PIO_STATUS_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_CNTL 0x0080 +#define regLSDMA_QUEUE0_RB_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_BASE 0x0081 +#define regLSDMA_QUEUE0_RB_BASE_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_BASE_HI 0x0082 +#define regLSDMA_QUEUE0_RB_BASE_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_RPTR 0x0083 +#define regLSDMA_QUEUE0_RB_RPTR_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_RPTR_HI 0x0084 +#define regLSDMA_QUEUE0_RB_RPTR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_WPTR 0x0085 +#define regLSDMA_QUEUE0_RB_WPTR_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_WPTR_HI 0x0086 +#define regLSDMA_QUEUE0_RB_WPTR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_WPTR_POLL_CNTL 0x0087 +#define regLSDMA_QUEUE0_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x0088 +#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x0089 +#define regLSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_RPTR_ADDR_HI 0x008a +#define regLSDMA_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_RPTR_ADDR_LO 0x008b +#define regLSDMA_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_CNTL 0x008c +#define regLSDMA_QUEUE0_IB_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_RPTR 0x008d +#define regLSDMA_QUEUE0_IB_RPTR_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_OFFSET 0x008e +#define regLSDMA_QUEUE0_IB_OFFSET_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_BASE_LO 0x008f +#define regLSDMA_QUEUE0_IB_BASE_LO_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_BASE_HI 0x0090 +#define regLSDMA_QUEUE0_IB_BASE_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_SIZE 0x0091 +#define regLSDMA_QUEUE0_IB_SIZE_BASE_IDX 0 +#define regLSDMA_QUEUE0_SKIP_CNTL 0x0092 +#define regLSDMA_QUEUE0_SKIP_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE0_CSA_ADDR_LO 0x0093 +#define regLSDMA_QUEUE0_CSA_ADDR_LO_BASE_IDX 0 +#define regLSDMA_QUEUE0_CSA_ADDR_HI 0x0094 +#define regLSDMA_QUEUE0_CSA_ADDR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_AQL_CNTL 0x0095 +#define regLSDMA_QUEUE0_RB_AQL_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE0_MINOR_PTR_UPDATE 0x0096 +#define regLSDMA_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0 +#define regLSDMA_QUEUE0_CNTL 0x0097 +#define regLSDMA_QUEUE0_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE0_RB_PREEMPT 0x0098 +#define regLSDMA_QUEUE0_RB_PREEMPT_BASE_IDX 0 +#define regLSDMA_QUEUE0_IB_SUB_REMAIN 0x0099 +#define regLSDMA_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0 +#define regLSDMA_QUEUE0_PREEMPT 0x009a +#define regLSDMA_QUEUE0_PREEMPT_BASE_IDX 0 +#define regLSDMA_QUEUE0_CONTEXT_STATUS 0x009b +#define regLSDMA_QUEUE0_CONTEXT_STATUS_BASE_IDX 0 +#define regLSDMA_QUEUE0_STATUS 0x009c +#define regLSDMA_QUEUE0_STATUS_BASE_IDX 0 +#define regLSDMA_QUEUE0_DOORBELL 0x009d +#define regLSDMA_QUEUE0_DOORBELL_BASE_IDX 0 +#define regLSDMA_QUEUE0_DOORBELL_OFFSET 0x009e +#define regLSDMA_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0 +#define regLSDMA_QUEUE0_DOORBELL_LOG 0x009f +#define regLSDMA_QUEUE0_DOORBELL_LOG_BASE_IDX 0 +#define regLSDMA_QUEUE0_WATERMARK 0x00a0 +#define regLSDMA_QUEUE0_WATERMARK_BASE_IDX 0 +#define regLSDMA_QUEUE0_DUMMY0 0x00a1 +#define regLSDMA_QUEUE0_DUMMY0_BASE_IDX 0 +#define regLSDMA_QUEUE0_DUMMY1 0x00a2 +#define regLSDMA_QUEUE0_DUMMY1_BASE_IDX 0 +#define regLSDMA_QUEUE0_DUMMY2 0x00a3 +#define regLSDMA_QUEUE0_DUMMY2_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA0 0x00c0 +#define regLSDMA_QUEUE0_MIDCMD_DATA0_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA1 0x00c1 +#define regLSDMA_QUEUE0_MIDCMD_DATA1_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA2 0x00c2 +#define regLSDMA_QUEUE0_MIDCMD_DATA2_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA3 0x00c3 +#define regLSDMA_QUEUE0_MIDCMD_DATA3_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA4 0x00c4 +#define regLSDMA_QUEUE0_MIDCMD_DATA4_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA5 0x00c5 +#define regLSDMA_QUEUE0_MIDCMD_DATA5_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA6 0x00c6 +#define regLSDMA_QUEUE0_MIDCMD_DATA6_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA7 0x00c7 +#define regLSDMA_QUEUE0_MIDCMD_DATA7_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA8 0x00c8 +#define regLSDMA_QUEUE0_MIDCMD_DATA8_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA9 0x00c9 +#define regLSDMA_QUEUE0_MIDCMD_DATA9_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_DATA10 0x00ca +#define regLSDMA_QUEUE0_MIDCMD_DATA10_BASE_IDX 0 +#define regLSDMA_QUEUE0_MIDCMD_CNTL 0x00cb +#define regLSDMA_QUEUE0_MIDCMD_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_CNTL 0x00d8 +#define regLSDMA_QUEUE1_RB_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_BASE 0x00d9 +#define regLSDMA_QUEUE1_RB_BASE_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_BASE_HI 0x00da +#define regLSDMA_QUEUE1_RB_BASE_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_RPTR 0x00db +#define regLSDMA_QUEUE1_RB_RPTR_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_RPTR_HI 0x00dc +#define regLSDMA_QUEUE1_RB_RPTR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_WPTR 0x00dd +#define regLSDMA_QUEUE1_RB_WPTR_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_WPTR_HI 0x00de +#define regLSDMA_QUEUE1_RB_WPTR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_WPTR_POLL_CNTL 0x00df +#define regLSDMA_QUEUE1_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x00e0 +#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x00e1 +#define regLSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_RPTR_ADDR_HI 0x00e2 +#define regLSDMA_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_RPTR_ADDR_LO 0x00e3 +#define regLSDMA_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_CNTL 0x00e4 +#define regLSDMA_QUEUE1_IB_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_RPTR 0x00e5 +#define regLSDMA_QUEUE1_IB_RPTR_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_OFFSET 0x00e6 +#define regLSDMA_QUEUE1_IB_OFFSET_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_BASE_LO 0x00e7 +#define regLSDMA_QUEUE1_IB_BASE_LO_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_BASE_HI 0x00e8 +#define regLSDMA_QUEUE1_IB_BASE_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_SIZE 0x00e9 +#define regLSDMA_QUEUE1_IB_SIZE_BASE_IDX 0 +#define regLSDMA_QUEUE1_SKIP_CNTL 0x00ea +#define regLSDMA_QUEUE1_SKIP_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_CSA_ADDR_LO 0x00eb +#define regLSDMA_QUEUE1_CSA_ADDR_LO_BASE_IDX 0 +#define regLSDMA_QUEUE1_CSA_ADDR_HI 0x00ec +#define regLSDMA_QUEUE1_CSA_ADDR_HI_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_AQL_CNTL 0x00ed +#define regLSDMA_QUEUE1_RB_AQL_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_MINOR_PTR_UPDATE 0x00ee +#define regLSDMA_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0 +#define regLSDMA_QUEUE1_CNTL 0x00ef +#define regLSDMA_QUEUE1_CNTL_BASE_IDX 0 +#define regLSDMA_QUEUE1_RB_PREEMPT 0x00f0 +#define regLSDMA_QUEUE1_RB_PREEMPT_BASE_IDX 0 +#define regLSDMA_QUEUE1_IB_SUB_REMAIN 0x00f1 +#define regLSDMA_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0 +#define regLSDMA_QUEUE1_PREEMPT 0x00f2 +#define regLSDMA_QUEUE1_PREEMPT_BASE_IDX 0 +#define regLSDMA_QUEUE1_CONTEXT_STATUS 0x00f3 +#define regLSDMA_QUEUE1_CONTEXT_STATUS_BASE_IDX 0 +#define regLSDMA_QUEUE1_STATUS 0x00f4 +#define regLSDMA_QUEUE1_STATUS_BASE_IDX 0 +#define regLSDMA_QUEUE1_DOORBELL 0x00f5 +#define regLSDMA_QUEUE1_DOORBELL_BASE_IDX 0 +#define regLSDMA_QUEUE1_DOORBELL_OFFSET 0x00f6 +#define regLSDMA_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0 +#define regLSDMA_QUEUE1_DOORBELL_LOG 0x00f7 +#define regLSDMA_QUEUE1_DOORBELL_LOG_BASE_IDX 0 +#define regLSDMA_QUEUE1_WATERMARK 0x00f8 +#define regLSDMA_QUEUE1_WATERMARK_BASE_IDX 0 +#define regLSDMA_QUEUE1_DUMMY0 0x00f9 +#define regLSDMA_QUEUE1_DUMMY0_BASE_IDX 0 +#define regLSDMA_QUEUE1_DUMMY1 0x00fa +#define regLSDMA_QUEUE1_DUMMY1_BASE_IDX 0 +#define regLSDMA_QUEUE1_DUMMY2 0x00fb +#define regLSDMA_QUEUE1_DUMMY2_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA0 0x0118 +#define regLSDMA_QUEUE1_MIDCMD_DATA0_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA1 0x0119 +#define regLSDMA_QUEUE1_MIDCMD_DATA1_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA2 0x011a +#define regLSDMA_QUEUE1_MIDCMD_DATA2_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA3 0x011b +#define regLSDMA_QUEUE1_MIDCMD_DATA3_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA4 0x011c +#define regLSDMA_QUEUE1_MIDCMD_DATA4_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA5 0x011d +#define regLSDMA_QUEUE1_MIDCMD_DATA5_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA6 0x011e +#define regLSDMA_QUEUE1_MIDCMD_DATA6_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA7 0x011f +#define regLSDMA_QUEUE1_MIDCMD_DATA7_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA8 0x0120 +#define regLSDMA_QUEUE1_MIDCMD_DATA8_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA9 0x0121 +#define regLSDMA_QUEUE1_MIDCMD_DATA9_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_DATA10 0x0122 +#define regLSDMA_QUEUE1_MIDCMD_DATA10_BASE_IDX 0 +#define regLSDMA_QUEUE1_MIDCMD_CNTL 0x0123 +#define regLSDMA_QUEUE1_MIDCMD_CNTL_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h new file mode 100644 index 000000000000..644a5d066ab2 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h @@ -0,0 +1,1411 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _lsdma_7_0_0_SH_MASK_HEADER +#define _lsdma_7_0_0_SH_MASK_HEADER + + +// addressBlock: lsdma0_lsdma0dec +//LSDMA_UCODE_ADDR +#define LSDMA_UCODE_ADDR__VALUE__SHIFT 0x0 +#define LSDMA_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//LSDMA_UCODE_DATA +#define LSDMA_UCODE_DATA__VALUE__SHIFT 0x0 +#define LSDMA_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//LSDMA_ERROR_INJECT_CNTL +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_IRRITATION__SHIFT 0x0 +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x1 +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x2 +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_MEMHUB_READ_POISON_INJECT__SHIFT 0x8 +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_MEMHUB_ATOMIC_POISON_INJECT__SHIFT 0x9 +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_IRRITATION_MASK 0x00000001L +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000002L +#define LSDMA_ERROR_INJECT_CNTL__ENABLE_ERROR_INJECT_MASK 0x0000000CL +//LSDMA_ERROR_INJECT_SELECT +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF0__SHIFT 0x0 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF1__SHIFT 0x1 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF2__SHIFT 0x2 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF3__SHIFT 0x3 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF4__SHIFT 0x4 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF5__SHIFT 0x5 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF6__SHIFT 0x6 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF7__SHIFT 0x7 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF8__SHIFT 0x8 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF9__SHIFT 0x9 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF10__SHIFT 0xa +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF11__SHIFT 0xb +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF12__SHIFT 0xc +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF13__SHIFT 0xd +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF14__SHIFT 0xe +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF15__SHIFT 0xf +#define LSDMA_ERROR_INJECT_SELECT__UCODE_BUF__SHIFT 0x10 +#define LSDMA_ERROR_INJECT_SELECT__RB_CMD_BUF__SHIFT 0x11 +#define LSDMA_ERROR_INJECT_SELECT__IB_CMD_BUF__SHIFT 0x12 +#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RD_FIFO__SHIFT 0x13 +#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RDBST_FIFO__SHIFT 0x14 +#define LSDMA_ERROR_INJECT_SELECT__UTCL1_WR_FIFO__SHIFT 0x15 +#define LSDMA_ERROR_INJECT_SELECT__DATA_LUT_FIFO__SHIFT 0x16 +#define LSDMA_ERROR_INJECT_SELECT__SPLIT_DATA_FIFO__SHIFT 0x17 +#define LSDMA_ERROR_INJECT_SELECT__MC_WR_ADDR_FIFO__SHIFT 0x18 +#define LSDMA_ERROR_INJECT_SELECT__MC_RDRET_BUF__SHIFT 0x19 +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF0_MASK 0x00000001L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF1_MASK 0x00000002L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF2_MASK 0x00000004L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF3_MASK 0x00000008L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF4_MASK 0x00000010L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF5_MASK 0x00000020L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF6_MASK 0x00000040L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF7_MASK 0x00000080L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF8_MASK 0x00000100L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF9_MASK 0x00000200L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF10_MASK 0x00000400L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF11_MASK 0x00000800L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF12_MASK 0x00001000L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF13_MASK 0x00002000L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF14_MASK 0x00004000L +#define LSDMA_ERROR_INJECT_SELECT__MBANK_DATA_BUF15_MASK 0x00008000L +#define LSDMA_ERROR_INJECT_SELECT__UCODE_BUF_MASK 0x00010000L +#define LSDMA_ERROR_INJECT_SELECT__RB_CMD_BUF_MASK 0x00020000L +#define LSDMA_ERROR_INJECT_SELECT__IB_CMD_BUF_MASK 0x00040000L +#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RD_FIFO_MASK 0x00080000L +#define LSDMA_ERROR_INJECT_SELECT__UTCL1_RDBST_FIFO_MASK 0x00100000L +#define LSDMA_ERROR_INJECT_SELECT__UTCL1_WR_FIFO_MASK 0x00200000L +#define LSDMA_ERROR_INJECT_SELECT__DATA_LUT_FIFO_MASK 0x00400000L +#define LSDMA_ERROR_INJECT_SELECT__SPLIT_DATA_FIFO_MASK 0x00800000L +#define LSDMA_ERROR_INJECT_SELECT__MC_WR_ADDR_FIFO_MASK 0x01000000L +#define LSDMA_ERROR_INJECT_SELECT__MC_RDRET_BUF_MASK 0x02000000L +#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_ADDR__SHIFT 0x0 +#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_DATA__SHIFT 0x1 +#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_ADDR_MASK 0x00000001L +#define LSDMA_PUB_REG_TYPE0__LSDMA_UCODE_DATA_MASK 0x00000002L +#define LSDMA_PUB_REG_TYPE3__LSDMA_CLK_CTRL__SHIFT 0xb +#define LSDMA_PUB_REG_TYPE3__LSDMA_CLK_CTRL_MASK 0x00000800L +//LSDMA_CONTEXT_GROUP_BOUNDARY +#define LSDMA_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define LSDMA_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//LSDMA_RB_RPTR_FETCH_HI +#define LSDMA_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define LSDMA_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_SEM_WAIT_FAIL_TIMER_CNTL +#define LSDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define LSDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//LSDMA_RB_RPTR_FETCH +#define LSDMA_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define LSDMA_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//LSDMA_IB_OFFSET_FETCH +#define LSDMA_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define LSDMA_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//LSDMA_PROGRAM +#define LSDMA_PROGRAM__STREAM__SHIFT 0x0 +#define LSDMA_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//LSDMA_STATUS_REG +#define LSDMA_STATUS_REG__IDLE__SHIFT 0x0 +#define LSDMA_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define LSDMA_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define LSDMA_STATUS_REG__RB_FULL__SHIFT 0x3 +#define LSDMA_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define LSDMA_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define LSDMA_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define LSDMA_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define LSDMA_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define LSDMA_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define LSDMA_STATUS_REG__EX_IDLE__SHIFT 0xa +#define LSDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define LSDMA_STATUS_REG__PACKET_READY__SHIFT 0xc +#define LSDMA_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define LSDMA_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define LSDMA_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define LSDMA_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define LSDMA_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define LSDMA_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define LSDMA_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define LSDMA_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define LSDMA_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define LSDMA_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define LSDMA_STATUS_REG__DRM_IDLE__SHIFT 0x17 +#define LSDMA_STATUS_REG__Reserved__SHIFT 0x18 +#define LSDMA_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define LSDMA_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define LSDMA_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define LSDMA_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define LSDMA_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define LSDMA_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define LSDMA_STATUS_REG__IDLE_MASK 0x00000001L +#define LSDMA_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define LSDMA_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define LSDMA_STATUS_REG__RB_FULL_MASK 0x00000008L +#define LSDMA_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define LSDMA_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define LSDMA_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define LSDMA_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define LSDMA_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define LSDMA_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define LSDMA_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define LSDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define LSDMA_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define LSDMA_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define LSDMA_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define LSDMA_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define LSDMA_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define LSDMA_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define LSDMA_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define LSDMA_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define LSDMA_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define LSDMA_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define LSDMA_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define LSDMA_STATUS_REG__Reserved_MASK 0x01000000L +#define LSDMA_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define LSDMA_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define LSDMA_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define LSDMA_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define LSDMA_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define LSDMA_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//LSDMA_STATUS1_REG +#define LSDMA_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define LSDMA_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define LSDMA_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define LSDMA_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define LSDMA_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define LSDMA_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define LSDMA_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define LSDMA_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7 +#define LSDMA_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8 +#define LSDMA_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define LSDMA_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define LSDMA_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb +#define LSDMA_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc +#define LSDMA_STATUS1_REG__EX_START__SHIFT 0xd +#define LSDMA_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe +#define LSDMA_STATUS1_REG__CE_RD_STALL__SHIFT 0xf +#define LSDMA_STATUS1_REG__CE_WR_STALL__SHIFT 0x10 +#define LSDMA_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define LSDMA_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define LSDMA_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define LSDMA_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define LSDMA_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define LSDMA_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define LSDMA_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define LSDMA_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define LSDMA_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define LSDMA_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L +#define LSDMA_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L +#define LSDMA_STATUS1_REG__EX_START_MASK 0x00002000L +#define LSDMA_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L +#define LSDMA_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L +//LSDMA_RD_BURST_CNTL +#define LSDMA_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define LSDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define LSDMA_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define LSDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//LSDMA_HBM_PAGE_CONFIG +#define LSDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define LSDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L +//LSDMA_UCODE_CHECKSUM +#define LSDMA_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define LSDMA_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//LSDMA_FREEZE +#define LSDMA_FREEZE__PREEMPT__SHIFT 0x0 +#define LSDMA_FREEZE__FREEZE__SHIFT 0x4 +#define LSDMA_FREEZE__FROZEN__SHIFT 0x5 +#define LSDMA_FREEZE__F32_FREEZE__SHIFT 0x6 +#define LSDMA_FREEZE__PREEMPT_MASK 0x00000001L +#define LSDMA_FREEZE__FREEZE_MASK 0x00000010L +#define LSDMA_FREEZE__FROZEN_MASK 0x00000020L +#define LSDMA_FREEZE__F32_FREEZE_MASK 0x00000040L +//LSDMA_DCC_CNTL +#define LSDMA_DCC_CNTL__DCC_FORCE_BYPASS__SHIFT 0x0 +#define LSDMA_DCC_CNTL__DCC_FORCE_BYPASS_MASK 0x00000001L +//LSDMA_POWER_GATING +#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_CONDITION__SHIFT 0x0 +#define LSDMA_POWER_GATING__LSDMA_POWER_ON_CONDITION__SHIFT 0x1 +#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_REQ__SHIFT 0x2 +#define LSDMA_POWER_GATING__LSDMA_POWER_ON_REQ__SHIFT 0x3 +#define LSDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4 +#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_CONDITION_MASK 0x00000001L +#define LSDMA_POWER_GATING__LSDMA_POWER_ON_CONDITION_MASK 0x00000002L +#define LSDMA_POWER_GATING__LSDMA_POWER_OFF_REQ_MASK 0x00000004L +#define LSDMA_POWER_GATING__LSDMA_POWER_ON_REQ_MASK 0x00000008L +#define LSDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L +//LSDMA_PGFSM_CONFIG +#define LSDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0 +#define LSDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8 +#define LSDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9 +#define LSDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa +#define LSDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb +#define LSDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc +#define LSDMA_PGFSM_CONFIG__READ__SHIFT 0xd +#define LSDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b +#define LSDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c +#define LSDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL +#define LSDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L +#define LSDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L +#define LSDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L +#define LSDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L +#define LSDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L +#define LSDMA_PGFSM_CONFIG__READ_MASK 0x00002000L +#define LSDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L +#define LSDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L +//LSDMA_PGFSM_WRITE +#define LSDMA_PGFSM_WRITE__VALUE__SHIFT 0x0 +#define LSDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL +//LSDMA_PGFSM_READ +#define LSDMA_PGFSM_READ__VALUE__SHIFT 0x0 +#define LSDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL +//LSDMA_BA_THRESHOLD +#define LSDMA_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define LSDMA_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define LSDMA_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define LSDMA_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//LSDMA_ID +#define LSDMA_ID__DEVICE_ID__SHIFT 0x0 +#define LSDMA_ID__DEVICE_ID_MASK 0x000000FFL +//LSDMA_VERSION +#define LSDMA_VERSION__MINVER__SHIFT 0x0 +#define LSDMA_VERSION__MAJVER__SHIFT 0x8 +#define LSDMA_VERSION__REV__SHIFT 0x10 +#define LSDMA_VERSION__MINVER_MASK 0x0000007FL +#define LSDMA_VERSION__MAJVER_MASK 0x00007F00L +#define LSDMA_VERSION__REV_MASK 0x003F0000L +//LSDMA_EDC_COUNTER +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF0_SED__SHIFT 0x0 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF1_SED__SHIFT 0x2 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF2_SED__SHIFT 0x4 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF3_SED__SHIFT 0x6 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF4_SED__SHIFT 0x8 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF5_SED__SHIFT 0xa +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF6_SED__SHIFT 0xc +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF8_SED__SHIFT 0x10 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF9_SED__SHIFT 0x12 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF10_SED__SHIFT 0x14 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF11_SED__SHIFT 0x16 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF12_SED__SHIFT 0x18 +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF13_SED__SHIFT 0x1a +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF14_SED__SHIFT 0x1c +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF15_SED__SHIFT 0x1e +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF0_SED_MASK 0x00000003L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF1_SED_MASK 0x0000000CL +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF2_SED_MASK 0x00000030L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF3_SED_MASK 0x000000C0L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF4_SED_MASK 0x00000300L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF5_SED_MASK 0x00000C00L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF6_SED_MASK 0x00003000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF7_SED_MASK 0x0000C000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF8_SED_MASK 0x00030000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF9_SED_MASK 0x000C0000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF10_SED_MASK 0x00300000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF11_SED_MASK 0x00C00000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF12_SED_MASK 0x03000000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF13_SED_MASK 0x0C000000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF14_SED_MASK 0x30000000L +#define LSDMA_EDC_COUNTER__LSDMA_MBANK_DATA_BUF15_SED_MASK 0xC0000000L +//LSDMA_EDC_COUNTER2 +#define LSDMA_EDC_COUNTER2__LSDMA_UCODE_BUF_SED__SHIFT 0x0 +#define LSDMA_EDC_COUNTER2__LSDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define LSDMA_EDC_COUNTER2__LSDMA_IB_CMD_BUF_SED__SHIFT 0x4 +#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RD_FIFO_SED__SHIFT 0x6 +#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x8 +#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_WR_FIFO_SED__SHIFT 0xa +#define LSDMA_EDC_COUNTER2__LSDMA_DATA_LUT_FIFO_SED__SHIFT 0xc +#define LSDMA_EDC_COUNTER2__LSDMA_SPLIT_DATA_BUF_SED__SHIFT 0xe +#define LSDMA_EDC_COUNTER2__LSDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10 +#define LSDMA_EDC_COUNTER2__LSDMA_MC_RDRET_BUF_SED__SHIFT 0x12 +#define LSDMA_EDC_COUNTER2__LSDMA_UCODE_BUF_SED_MASK 0x00000003L +#define LSDMA_EDC_COUNTER2__LSDMA_RB_CMD_BUF_SED_MASK 0x0000000CL +#define LSDMA_EDC_COUNTER2__LSDMA_IB_CMD_BUF_SED_MASK 0x00000030L +#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RD_FIFO_SED_MASK 0x000000C0L +#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000300L +#define LSDMA_EDC_COUNTER2__LSDMA_UTCL1_WR_FIFO_SED_MASK 0x00000C00L +#define LSDMA_EDC_COUNTER2__LSDMA_DATA_LUT_FIFO_SED_MASK 0x00003000L +#define LSDMA_EDC_COUNTER2__LSDMA_SPLIT_DATA_BUF_SED_MASK 0x0000C000L +#define LSDMA_EDC_COUNTER2__LSDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00030000L +#define LSDMA_EDC_COUNTER2__LSDMA_MC_RDRET_BUF_SED_MASK 0x000C0000L +//LSDMA_STATUS2_REG +#define LSDMA_STATUS2_REG__ID__SHIFT 0x0 +#define LSDMA_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define LSDMA_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define LSDMA_STATUS2_REG__ID_MASK 0x00000007L +#define LSDMA_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define LSDMA_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//LSDMA_ATOMIC_CNTL +#define LSDMA_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define LSDMA_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +//LSDMA_ATOMIC_PREOP_LO +#define LSDMA_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define LSDMA_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//LSDMA_ATOMIC_PREOP_HI +#define LSDMA_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define LSDMA_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//LSDMA_UTCL1_CNTL +#define LSDMA_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define LSDMA_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define LSDMA_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define LSDMA_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define LSDMA_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define LSDMA_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define LSDMA_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define LSDMA_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define LSDMA_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define LSDMA_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define LSDMA_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define LSDMA_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//LSDMA_UTCL1_WATERMK +#define LSDMA_UTCL1_WATERMK__REQ_WATERMK__SHIFT 0x0 +#define LSDMA_UTCL1_WATERMK__REQ_DEPTH__SHIFT 0x3 +#define LSDMA_UTCL1_WATERMK__PAGE_WATERMK__SHIFT 0x5 +#define LSDMA_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x8 +#define LSDMA_UTCL1_WATERMK__RESERVED__SHIFT 0x10 +#define LSDMA_UTCL1_WATERMK__REQ_WATERMK_MASK 0x00000007L +#define LSDMA_UTCL1_WATERMK__REQ_DEPTH_MASK 0x00000018L +#define LSDMA_UTCL1_WATERMK__PAGE_WATERMK_MASK 0x000000E0L +#define LSDMA_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x0000FF00L +#define LSDMA_UTCL1_WATERMK__RESERVED_MASK 0xFFFF0000L +//LSDMA_UTCL1_RD_STATUS +#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define LSDMA_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define LSDMA_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define LSDMA_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define LSDMA_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define LSDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define LSDMA_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define LSDMA_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define LSDMA_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define LSDMA_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define LSDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define LSDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define LSDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define LSDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define LSDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define LSDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define LSDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define LSDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define LSDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define LSDMA_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define LSDMA_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define LSDMA_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define LSDMA_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define LSDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define LSDMA_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define LSDMA_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define LSDMA_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define LSDMA_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//LSDMA_UTCL1_WR_STATUS +#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define LSDMA_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define LSDMA_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define LSDMA_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define LSDMA_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define LSDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define LSDMA_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define LSDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define LSDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define LSDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define LSDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define LSDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define LSDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define LSDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define LSDMA_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define LSDMA_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define LSDMA_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define LSDMA_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define LSDMA_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define LSDMA_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define LSDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define LSDMA_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define LSDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define LSDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//LSDMA_UTCL1_INV0 +#define LSDMA_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define LSDMA_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define LSDMA_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define LSDMA_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define LSDMA_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define LSDMA_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define LSDMA_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define LSDMA_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define LSDMA_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define LSDMA_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define LSDMA_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define LSDMA_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define LSDMA_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define LSDMA_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define LSDMA_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define LSDMA_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define LSDMA_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define LSDMA_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define LSDMA_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define LSDMA_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define LSDMA_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define LSDMA_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define LSDMA_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define LSDMA_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define LSDMA_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define LSDMA_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define LSDMA_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define LSDMA_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//LSDMA_UTCL1_INV1 +#define LSDMA_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define LSDMA_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//LSDMA_UTCL1_INV2 +#define LSDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define LSDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//LSDMA_UTCL1_RD_XNACK0 +#define LSDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define LSDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//LSDMA_UTCL1_RD_XNACK1 +#define LSDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define LSDMA_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define LSDMA_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define LSDMA_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define LSDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define LSDMA_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define LSDMA_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define LSDMA_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//LSDMA_UTCL1_WR_XNACK0 +#define LSDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define LSDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//LSDMA_UTCL1_WR_XNACK1 +#define LSDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define LSDMA_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define LSDMA_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define LSDMA_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define LSDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define LSDMA_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define LSDMA_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define LSDMA_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//LSDMA_UTCL1_TIMEOUT +#define LSDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define LSDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define LSDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define LSDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//LSDMA_UTCL1_PAGE +#define LSDMA_UTCL1_PAGE__INVALID_ADDR__SHIFT 0x0 +#define LSDMA_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define LSDMA_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5 +#define LSDMA_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define LSDMA_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define LSDMA_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define LSDMA_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L +#define LSDMA_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define LSDMA_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//LSDMA_RELAX_ORDERING_LUT +#define LSDMA_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define LSDMA_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define LSDMA_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define LSDMA_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define LSDMA_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define LSDMA_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define LSDMA_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define LSDMA_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define LSDMA_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define LSDMA_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define LSDMA_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define LSDMA_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define LSDMA_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define LSDMA_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define LSDMA_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define LSDMA_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define LSDMA_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define LSDMA_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define LSDMA_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define LSDMA_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define LSDMA_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define LSDMA_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define LSDMA_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define LSDMA_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define LSDMA_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define LSDMA_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define LSDMA_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define LSDMA_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define LSDMA_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define LSDMA_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define LSDMA_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define LSDMA_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define LSDMA_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define LSDMA_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define LSDMA_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define LSDMA_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define LSDMA_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define LSDMA_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//LSDMA_CHICKEN_BITS_2 +#define LSDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define LSDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4 +#define LSDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +#define LSDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L +//LSDMA_STATUS3_REG +#define LSDMA_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define LSDMA_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define LSDMA_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define LSDMA_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define LSDMA_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define LSDMA_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define LSDMA_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define LSDMA_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define LSDMA_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define LSDMA_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//LSDMA_PHYSICAL_ADDR_LO +#define LSDMA_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define LSDMA_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define LSDMA_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define LSDMA_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define LSDMA_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define LSDMA_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define LSDMA_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define LSDMA_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//LSDMA_PHYSICAL_ADDR_HI +#define LSDMA_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//LSDMA_ECC_CNTL +#define LSDMA_ECC_CNTL__ECC_DISABLE__SHIFT 0x0 +#define LSDMA_ECC_CNTL__ECC_DISABLE_MASK 0x00000001L +//LSDMA_ERROR_LOG +#define LSDMA_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define LSDMA_ERROR_LOG__STATUS__SHIFT 0x10 +#define LSDMA_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define LSDMA_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//LSDMA_PUB_DUMMY0 +#define LSDMA_PUB_DUMMY0__DUMMY__SHIFT 0x0 +#define LSDMA_PUB_DUMMY0__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_PUB_DUMMY1 +#define LSDMA_PUB_DUMMY1__DUMMY__SHIFT 0x0 +#define LSDMA_PUB_DUMMY1__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_PUB_DUMMY2 +#define LSDMA_PUB_DUMMY2__DUMMY__SHIFT 0x0 +#define LSDMA_PUB_DUMMY2__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_PUB_DUMMY3 +#define LSDMA_PUB_DUMMY3__DUMMY__SHIFT 0x0 +#define LSDMA_PUB_DUMMY3__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_F32_COUNTER +#define LSDMA_F32_COUNTER__VALUE__SHIFT 0x0 +#define LSDMA_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//LSDMA_PERFCNT_PERFCOUNTER0_CFG +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define LSDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//LSDMA_PERFCNT_PERFCOUNTER1_CFG +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define LSDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define LSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//LSDMA_PERFCNT_MISC_CNTL +#define LSDMA_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0 +#define LSDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT__SHIFT 0x10 +#define LSDMA_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL +#define LSDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT_MASK 0x00010000L +//LSDMA_PERFCNT_PERFCOUNTER_LO +#define LSDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define LSDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//LSDMA_PERFCNT_PERFCOUNTER_HI +#define LSDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define LSDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define LSDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define LSDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//LSDMA_CRD_CNTL +#define LSDMA_CRD_CNTL__DRM_CREDIT__SHIFT 0x0 +#define LSDMA_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define LSDMA_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define LSDMA_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define LSDMA_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//LSDMA_ULV_CNTL +#define LSDMA_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define LSDMA_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define LSDMA_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define LSDMA_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define LSDMA_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define LSDMA_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define LSDMA_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define LSDMA_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define LSDMA_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define LSDMA_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define LSDMA_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define LSDMA_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//LSDMA_EA_DBIT_ADDR_DATA +#define LSDMA_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define LSDMA_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//LSDMA_EA_DBIT_ADDR_INDEX +#define LSDMA_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define LSDMA_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//LSDMA_STATUS4_REG +#define LSDMA_STATUS4_REG__IDLE__SHIFT 0x0 +#define LSDMA_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2 +#define LSDMA_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3 +#define LSDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING__SHIFT 0x4 +#define LSDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING__SHIFT 0x5 +#define LSDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x6 +#define LSDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x7 +#define LSDMA_STATUS4_REG__REG_POLLING__SHIFT 0x8 +#define LSDMA_STATUS4_REG__MEM_POLLING__SHIFT 0x9 +#define LSDMA_STATUS4_REG__UTCL2_RD_XNACK__SHIFT 0xa +#define LSDMA_STATUS4_REG__UTCL2_WR_XNACK__SHIFT 0xc +#define LSDMA_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0xe +#define LSDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x12 +#define LSDMA_STATUS4_REG__SRIOV_LSDMA_EXECUTING_CMD__SHIFT 0x13 +#define LSDMA_STATUS4_REG__IDLE_MASK 0x00000001L +#define LSDMA_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L +#define LSDMA_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L +#define LSDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING_MASK 0x00000010L +#define LSDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING_MASK 0x00000020L +#define LSDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000040L +#define LSDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000080L +#define LSDMA_STATUS4_REG__REG_POLLING_MASK 0x00000100L +#define LSDMA_STATUS4_REG__MEM_POLLING_MASK 0x00000200L +#define LSDMA_STATUS4_REG__UTCL2_RD_XNACK_MASK 0x00000C00L +#define LSDMA_STATUS4_REG__UTCL2_WR_XNACK_MASK 0x00003000L +#define LSDMA_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x0003C000L +#define LSDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00040000L +#define LSDMA_STATUS4_REG__SRIOV_LSDMA_EXECUTING_CMD_MASK 0x00080000L +//LSDMA_CE_CTRL +#define LSDMA_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0 +#define LSDMA_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3 +#define LSDMA_CE_CTRL__RESERVED_7_5__SHIFT 0x5 +#define LSDMA_CE_CTRL__RESERVED__SHIFT 0x8 +#define LSDMA_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L +#define LSDMA_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L +#define LSDMA_CE_CTRL__RESERVED_7_5_MASK 0x000000E0L +#define LSDMA_CE_CTRL__RESERVED_MASK 0xFFFFFF00L +//LSDMA_EXCEPTION_STATUS +#define LSDMA_EXCEPTION_STATUS__RB_FETCH_ECC__SHIFT 0x0 +#define LSDMA_EXCEPTION_STATUS__IB_FETCH_ECC__SHIFT 0x1 +#define LSDMA_EXCEPTION_STATUS__COPY_CMD_ECC__SHIFT 0x2 +#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_ECC__SHIFT 0x3 +#define LSDMA_EXCEPTION_STATUS__SRAM_ECC__SHIFT 0x6 +#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_GEN_ERR__SHIFT 0x8 +#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_GEN_ERR__SHIFT 0x9 +#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_GEN_ERR__SHIFT 0xa +#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_GEN_ERR__SHIFT 0xb +#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_GEN_ERR__SHIFT 0xd +#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_PRT__SHIFT 0x10 +#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_PRT__SHIFT 0x11 +#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_PRT__SHIFT 0x12 +#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_PRT__SHIFT 0x13 +#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_PRT__SHIFT 0x15 +#define LSDMA_EXCEPTION_STATUS__INVALID_ADDR__SHIFT 0x18 +#define LSDMA_EXCEPTION_STATUS__RB_FETCH_ECC_MASK 0x00000001L +#define LSDMA_EXCEPTION_STATUS__IB_FETCH_ECC_MASK 0x00000002L +#define LSDMA_EXCEPTION_STATUS__COPY_CMD_ECC_MASK 0x00000004L +#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_ECC_MASK 0x00000008L +#define LSDMA_EXCEPTION_STATUS__SRAM_ECC_MASK 0x00000040L +#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_GEN_ERR_MASK 0x00000100L +#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_GEN_ERR_MASK 0x00000200L +#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_GEN_ERR_MASK 0x00000400L +#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_GEN_ERR_MASK 0x00000800L +#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_GEN_ERR_MASK 0x00002000L +#define LSDMA_EXCEPTION_STATUS__RB_FETCH_NACK_PRT_MASK 0x00010000L +#define LSDMA_EXCEPTION_STATUS__IB_FETCH_NACK_PRT_MASK 0x00020000L +#define LSDMA_EXCEPTION_STATUS__COPY_CMD_NACK_PRT_MASK 0x00040000L +#define LSDMA_EXCEPTION_STATUS__NON_COPY_CMD_NACK_PRT_MASK 0x00080000L +#define LSDMA_EXCEPTION_STATUS__RPTR_WB_NACK_PRT_MASK 0x00200000L +//LSDMA_INT_CNTL +#define LSDMA_INT_CNTL__ATOMIC_RTN_DONE_INT_ENABLE__SHIFT 0x0 +#define LSDMA_INT_CNTL__TRAP_INT_ENABLE__SHIFT 0x1 +#define LSDMA_INT_CNTL__SRBM_WRITE_INT_ENABLE__SHIFT 0x2 +#define LSDMA_INT_CNTL__CTX_EMPTY_INT_ENABLE__SHIFT 0x3 +#define LSDMA_INT_CNTL__FROZEN_INT_ENABLE__SHIFT 0x4 +#define LSDMA_INT_CNTL__PREEMPT_INT_ENABLE__SHIFT 0x5 +#define LSDMA_INT_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x6 +#define LSDMA_INT_CNTL__ATOMIC_TIMEOUT_INT_ENABLE__SHIFT 0x7 +#define LSDMA_INT_CNTL__POLL_TIMEOUT_INT_ENABLE__SHIFT 0x8 +#define LSDMA_INT_CNTL__INVALID_ADDR_INT_ENABLE__SHIFT 0x9 +#define LSDMA_INT_CNTL__NACK_GEN_ERR_INT_ENABLE__SHIFT 0xa +#define LSDMA_INT_CNTL__NACK_PRT_INT_ENABLE__SHIFT 0xb +#define LSDMA_INT_CNTL__ECC_INT_ENABLE__SHIFT 0xc +#define LSDMA_INT_CNTL__ATOMIC_RTN_DONE_INT_ENABLE_MASK 0x00000001L +#define LSDMA_INT_CNTL__TRAP_INT_ENABLE_MASK 0x00000002L +#define LSDMA_INT_CNTL__SRBM_WRITE_INT_ENABLE_MASK 0x00000004L +#define LSDMA_INT_CNTL__CTX_EMPTY_INT_ENABLE_MASK 0x00000008L +#define LSDMA_INT_CNTL__FROZEN_INT_ENABLE_MASK 0x00000010L +#define LSDMA_INT_CNTL__PREEMPT_INT_ENABLE_MASK 0x00000020L +#define LSDMA_INT_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x00000040L +#define LSDMA_INT_CNTL__ATOMIC_TIMEOUT_INT_ENABLE_MASK 0x00000080L +#define LSDMA_INT_CNTL__POLL_TIMEOUT_INT_ENABLE_MASK 0x00000100L +#define LSDMA_INT_CNTL__NACK_GEN_ERR_INT_ENABLE_MASK 0x00000400L +#define LSDMA_INT_CNTL__NACK_PRT_INT_ENABLE_MASK 0x00000800L +#define LSDMA_INT_CNTL__ECC_INT_ENABLE_MASK 0x00001000L +//LSDMA_MEM_POWER_CTRL +#define LSDMA_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT 0x0 +#define LSDMA_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK 0x00000001L +//LSDMA_CLK_CTRL +#define LSDMA_CLK_CTRL__RESERVED__SHIFT 0x1 +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define LSDMA_CLK_CTRL__RESERVED_MASK 0x00FFFFFEL +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define LSDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//LSDMA_CNTL +#define LSDMA_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define LSDMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define LSDMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define LSDMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define LSDMA_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define LSDMA_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x6 +#define LSDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define LSDMA_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define LSDMA_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13 +#define LSDMA_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define LSDMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define LSDMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define LSDMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define LSDMA_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define LSDMA_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000040L +#define LSDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define LSDMA_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +//LSDMA_CHICKEN_BITS +#define LSDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define LSDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define LSDMA_CHICKEN_BITS__F32_MGCG_ENABLE__SHIFT 0x3 +#define LSDMA_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define LSDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define LSDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define LSDMA_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define LSDMA_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12 +#define LSDMA_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define LSDMA_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define LSDMA_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x18 +#define LSDMA_CHICKEN_BITS__DRAM_ECC_NACK_F32_RESET_ENABLE__SHIFT 0x19 +#define LSDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define LSDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define LSDMA_CHICKEN_BITS__F32_MGCG_ENABLE_MASK 0x00000008L +#define LSDMA_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define LSDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define LSDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define LSDMA_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define LSDMA_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L +#define LSDMA_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define LSDMA_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +//LSDMA_PIO_SRC_ADDR_LO +#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO__SHIFT 0x0 +#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO_MASK 0xFFFFFFFFL +//LSDMA_PIO_SRC_ADDR_HI +#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0 +#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xFFFFFFFFL +//LSDMA_PIO_DST_ADDR_LO +#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO__SHIFT 0x0 +#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO_MASK 0xFFFFFFFFL +//LSDMA_PIO_DST_ADDR_HI +#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0 +#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI_MASK 0xFFFFFFFFL +//LSDMA_PIO_COMMAND +#define LSDMA_PIO_COMMAND__BYTE_COUNT__SHIFT 0x0 +#define LSDMA_PIO_COMMAND__SRC_LOCATION__SHIFT 0x1a +#define LSDMA_PIO_COMMAND__DST_LOCATION__SHIFT 0x1b +#define LSDMA_PIO_COMMAND__SRC_ADDR_INC__SHIFT 0x1c +#define LSDMA_PIO_COMMAND__DST_ADDR_INC__SHIFT 0x1d +#define LSDMA_PIO_COMMAND__OVERLAP_DISABLE__SHIFT 0x1e +#define LSDMA_PIO_COMMAND__CONSTANT_FILL__SHIFT 0x1f +#define LSDMA_PIO_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL +#define LSDMA_PIO_COMMAND__SRC_LOCATION_MASK 0x04000000L +#define LSDMA_PIO_COMMAND__DST_LOCATION_MASK 0x08000000L +#define LSDMA_PIO_COMMAND__SRC_ADDR_INC_MASK 0x10000000L +#define LSDMA_PIO_COMMAND__DST_ADDR_INC_MASK 0x20000000L +#define LSDMA_PIO_COMMAND__OVERLAP_DISABLE_MASK 0x40000000L +#define LSDMA_PIO_COMMAND__CONSTANT_FILL_MASK 0x80000000L +//LSDMA_PIO_CONSTFILL_DATA +#define LSDMA_PIO_CONSTFILL_DATA__DATA__SHIFT 0x0 +#define LSDMA_PIO_CONSTFILL_DATA__DATA_MASK 0xFFFFFFFFL +//LSDMA_PIO_CONTROL +#define LSDMA_PIO_CONTROL__VMID__SHIFT 0x0 +#define LSDMA_PIO_CONTROL__DST_GPA__SHIFT 0x4 +#define LSDMA_PIO_CONTROL__DST_SYS__SHIFT 0x5 +#define LSDMA_PIO_CONTROL__DST_GCC__SHIFT 0x6 +#define LSDMA_PIO_CONTROL__DST_SNOOP__SHIFT 0x7 +#define LSDMA_PIO_CONTROL__DST_REUSE_HINT__SHIFT 0x8 +#define LSDMA_PIO_CONTROL__DST_COMP_EN__SHIFT 0xa +#define LSDMA_PIO_CONTROL__SRC_GPA__SHIFT 0x14 +#define LSDMA_PIO_CONTROL__SRC_SYS__SHIFT 0x15 +#define LSDMA_PIO_CONTROL__SRC_SNOOP__SHIFT 0x17 +#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT__SHIFT 0x18 +#define LSDMA_PIO_CONTROL__SRC_COMP_EN__SHIFT 0x1a +#define LSDMA_PIO_CONTROL__VMID_MASK 0x0000000FL +#define LSDMA_PIO_CONTROL__DST_GPA_MASK 0x00000010L +#define LSDMA_PIO_CONTROL__DST_SYS_MASK 0x00000020L +#define LSDMA_PIO_CONTROL__DST_GCC_MASK 0x00000040L +#define LSDMA_PIO_CONTROL__DST_SNOOP_MASK 0x00000080L +#define LSDMA_PIO_CONTROL__DST_REUSE_HINT_MASK 0x00000300L +#define LSDMA_PIO_CONTROL__DST_COMP_EN_MASK 0x00000400L +#define LSDMA_PIO_CONTROL__SRC_GPA_MASK 0x00100000L +#define LSDMA_PIO_CONTROL__SRC_SYS_MASK 0x00200000L +#define LSDMA_PIO_CONTROL__SRC_SNOOP_MASK 0x00800000L +#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT_MASK 0x03000000L +#define LSDMA_PIO_CONTROL__SRC_COMP_EN_MASK 0x04000000L +//LSDMA_PIO_STATUS +#define LSDMA_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0 +#define LSDMA_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3 +#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0x8 +#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0x9 +#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xa +#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xb +#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf +#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10 +#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11 +#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12 +#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c +#define LSDMA_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d +#define LSDMA_PIO_STATUS__PIO_IDLE__SHIFT 0x1f +#define LSDMA_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L +#define LSDMA_PIO_STATUS__CMD_PROCESSING_MASK 0x000000F8L +#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR_MASK 0x00000100L +#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00000200L +#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00000400L +#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00000800L +#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L +#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L +#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L +#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L +#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L +#define LSDMA_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L +#define LSDMA_PIO_STATUS__PIO_IDLE_MASK 0x80000000L +//LSDMA_PF_PIO_STATUS +#define LSDMA_PF_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0 +#define LSDMA_PF_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3 +#define LSDMA_PF_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0x8 +#define LSDMA_PF_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0x9 +#define LSDMA_PF_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xa +#define LSDMA_PF_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xb +#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf +#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10 +#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11 +#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12 +#define LSDMA_PF_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c +#define LSDMA_PF_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d +#define LSDMA_PF_PIO_STATUS__PIO_IDLE__SHIFT 0x1f +#define LSDMA_PF_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L +#define LSDMA_PF_PIO_STATUS__CMD_PROCESSING_MASK 0x000000F8L +#define LSDMA_PF_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00000200L +#define LSDMA_PF_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00000400L +#define LSDMA_PF_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00000800L +#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L +#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L +#define LSDMA_PF_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L +#define LSDMA_PF_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L +#define LSDMA_PF_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L +#define LSDMA_PF_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L +#define LSDMA_PF_PIO_STATUS__PIO_IDLE_MASK 0x80000000L +//LSDMA_QUEUE0_RB_CNTL +#define LSDMA_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define LSDMA_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define LSDMA_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define LSDMA_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define LSDMA_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define LSDMA_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define LSDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define LSDMA_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//LSDMA_QUEUE0_RB_BASE +#define LSDMA_QUEUE0_RB_BASE__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_BASE_HI +#define LSDMA_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//LSDMA_QUEUE0_RB_RPTR +#define LSDMA_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_RPTR_HI +#define LSDMA_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_WPTR +#define LSDMA_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_WPTR_HI +#define LSDMA_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_WPTR_POLL_CNTL +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define LSDMA_QUEUE0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI +#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO +#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define LSDMA_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//LSDMA_QUEUE0_RB_RPTR_ADDR_HI +#define LSDMA_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_RPTR_ADDR_LO +#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define LSDMA_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//LSDMA_QUEUE0_IB_CNTL +#define LSDMA_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define LSDMA_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define LSDMA_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define LSDMA_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f +#define LSDMA_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define LSDMA_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define LSDMA_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//LSDMA_QUEUE0_IB_RPTR +#define LSDMA_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2 +#define LSDMA_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//LSDMA_QUEUE0_IB_OFFSET +#define LSDMA_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define LSDMA_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//LSDMA_QUEUE0_IB_BASE_LO +#define LSDMA_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define LSDMA_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//LSDMA_QUEUE0_IB_BASE_HI +#define LSDMA_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_IB_SIZE +#define LSDMA_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0 +#define LSDMA_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//LSDMA_QUEUE0_SKIP_CNTL +#define LSDMA_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define LSDMA_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//LSDMA_QUEUE0_CSA_ADDR_LO +#define LSDMA_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define LSDMA_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//LSDMA_QUEUE0_CSA_ADDR_HI +#define LSDMA_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_RB_AQL_CNTL +#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define LSDMA_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define LSDMA_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//LSDMA_QUEUE0_MINOR_PTR_UPDATE +#define LSDMA_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//LSDMA_QUEUE0_CNTL +#define LSDMA_QUEUE0_CNTL__QUANTUM__SHIFT 0x0 +#define LSDMA_QUEUE0_CNTL__QUANTUM_MASK 0x000000FFL +//LSDMA_QUEUE0_RB_PREEMPT +#define LSDMA_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0 +#define LSDMA_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L +//LSDMA_QUEUE0_IB_SUB_REMAIN +#define LSDMA_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define LSDMA_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//LSDMA_QUEUE0_PREEMPT +#define LSDMA_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define LSDMA_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//LSDMA_QUEUE0_CONTEXT_STATUS +#define LSDMA_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define LSDMA_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define LSDMA_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define LSDMA_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define LSDMA_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define LSDMA_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define LSDMA_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define LSDMA_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define LSDMA_QUEUE0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define LSDMA_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//LSDMA_QUEUE0_STATUS +#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define LSDMA_QUEUE0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//LSDMA_QUEUE0_DOORBELL +#define LSDMA_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c +#define LSDMA_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e +#define LSDMA_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L +#define LSDMA_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L +//LSDMA_QUEUE0_DOORBELL_OFFSET +#define LSDMA_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define LSDMA_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//LSDMA_QUEUE0_DOORBELL_LOG +#define LSDMA_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define LSDMA_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define LSDMA_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define LSDMA_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//LSDMA_QUEUE0_WATERMARK +#define LSDMA_QUEUE0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define LSDMA_QUEUE0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define LSDMA_QUEUE0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define LSDMA_QUEUE0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//LSDMA_QUEUE0_DUMMY0 +#define LSDMA_QUEUE0_DUMMY0__DUMMY__SHIFT 0x0 +#define LSDMA_QUEUE0_DUMMY0__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_DUMMY1 +#define LSDMA_QUEUE0_DUMMY1__DUMMY__SHIFT 0x0 +#define LSDMA_QUEUE0_DUMMY1__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_DUMMY2 +#define LSDMA_QUEUE0_DUMMY2__DUMMY__SHIFT 0x0 +#define LSDMA_QUEUE0_DUMMY2__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA0 +#define LSDMA_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA1 +#define LSDMA_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA2 +#define LSDMA_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA3 +#define LSDMA_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA4 +#define LSDMA_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA5 +#define LSDMA_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA6 +#define LSDMA_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA7 +#define LSDMA_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA8 +#define LSDMA_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA9 +#define LSDMA_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_DATA10 +#define LSDMA_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL +//LSDMA_QUEUE0_MIDCMD_CNTL +#define LSDMA_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define LSDMA_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define LSDMA_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define LSDMA_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define LSDMA_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define LSDMA_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define LSDMA_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define LSDMA_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//LSDMA_QUEUE1_RB_CNTL +#define LSDMA_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define LSDMA_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define LSDMA_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define LSDMA_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define LSDMA_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define LSDMA_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define LSDMA_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define LSDMA_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//LSDMA_QUEUE1_RB_BASE +#define LSDMA_QUEUE1_RB_BASE__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_BASE_HI +#define LSDMA_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//LSDMA_QUEUE1_RB_RPTR +#define LSDMA_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_RPTR_HI +#define LSDMA_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_WPTR +#define LSDMA_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_WPTR_HI +#define LSDMA_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_WPTR_POLL_CNTL +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define LSDMA_QUEUE1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI +#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO +#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define LSDMA_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//LSDMA_QUEUE1_RB_RPTR_ADDR_HI +#define LSDMA_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_RPTR_ADDR_LO +#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define LSDMA_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//LSDMA_QUEUE1_IB_CNTL +#define LSDMA_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define LSDMA_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define LSDMA_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define LSDMA_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f +#define LSDMA_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define LSDMA_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define LSDMA_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//LSDMA_QUEUE1_IB_RPTR +#define LSDMA_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2 +#define LSDMA_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//LSDMA_QUEUE1_IB_OFFSET +#define LSDMA_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define LSDMA_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//LSDMA_QUEUE1_IB_BASE_LO +#define LSDMA_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define LSDMA_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//LSDMA_QUEUE1_IB_BASE_HI +#define LSDMA_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_IB_SIZE +#define LSDMA_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0 +#define LSDMA_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//LSDMA_QUEUE1_SKIP_CNTL +#define LSDMA_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define LSDMA_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//LSDMA_QUEUE1_CSA_ADDR_LO +#define LSDMA_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define LSDMA_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//LSDMA_QUEUE1_CSA_ADDR_HI +#define LSDMA_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define LSDMA_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_RB_AQL_CNTL +#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define LSDMA_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define LSDMA_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define LSDMA_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//LSDMA_QUEUE1_MINOR_PTR_UPDATE +#define LSDMA_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define LSDMA_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//LSDMA_QUEUE1_CNTL +#define LSDMA_QUEUE1_CNTL__QUANTUM__SHIFT 0x0 +#define LSDMA_QUEUE1_CNTL__QUANTUM_MASK 0x000000FFL +//LSDMA_QUEUE1_RB_PREEMPT +#define LSDMA_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0 +#define LSDMA_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L +//LSDMA_QUEUE1_IB_SUB_REMAIN +#define LSDMA_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define LSDMA_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//LSDMA_QUEUE1_PREEMPT +#define LSDMA_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define LSDMA_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//LSDMA_QUEUE1_CONTEXT_STATUS +#define LSDMA_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define LSDMA_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define LSDMA_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define LSDMA_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define LSDMA_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define LSDMA_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define LSDMA_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define LSDMA_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define LSDMA_QUEUE1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define LSDMA_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//LSDMA_QUEUE1_STATUS +#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define LSDMA_QUEUE1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//LSDMA_QUEUE1_DOORBELL +#define LSDMA_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c +#define LSDMA_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e +#define LSDMA_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L +#define LSDMA_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L +//LSDMA_QUEUE1_DOORBELL_OFFSET +#define LSDMA_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define LSDMA_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//LSDMA_QUEUE1_DOORBELL_LOG +#define LSDMA_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define LSDMA_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define LSDMA_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define LSDMA_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//LSDMA_QUEUE1_WATERMARK +#define LSDMA_QUEUE1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define LSDMA_QUEUE1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define LSDMA_QUEUE1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define LSDMA_QUEUE1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//LSDMA_QUEUE1_DUMMY0 +#define LSDMA_QUEUE1_DUMMY0__DUMMY__SHIFT 0x0 +#define LSDMA_QUEUE1_DUMMY0__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_DUMMY1 +#define LSDMA_QUEUE1_DUMMY1__DUMMY__SHIFT 0x0 +#define LSDMA_QUEUE1_DUMMY1__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_DUMMY2 +#define LSDMA_QUEUE1_DUMMY2__DUMMY__SHIFT 0x0 +#define LSDMA_QUEUE1_DUMMY2__DUMMY_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA0 +#define LSDMA_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA1 +#define LSDMA_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA2 +#define LSDMA_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA3 +#define LSDMA_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA4 +#define LSDMA_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA5 +#define LSDMA_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA6 +#define LSDMA_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA7 +#define LSDMA_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA8 +#define LSDMA_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA9 +#define LSDMA_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_DATA10 +#define LSDMA_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL +//LSDMA_QUEUE1_MIDCMD_CNTL +#define LSDMA_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define LSDMA_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define LSDMA_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define LSDMA_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define LSDMA_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define LSDMA_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define LSDMA_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define LSDMA_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h new file mode 100644 index 000000000000..6a1b7b524809 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h @@ -0,0 +1,468 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _mp_14_0_2_OFFSET_HEADER +#define _mp_14_0_2_OFFSET_HEADER + + +// addressBlock: mp_SmuMp1_SmnDec +// base address: 0x0 +#define regMP1_SMN_C2PMSG_0 0x0040 +#define regMP1_SMN_C2PMSG_0_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_1 0x0041 +#define regMP1_SMN_C2PMSG_1_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_2 0x0042 +#define regMP1_SMN_C2PMSG_2_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_3 0x0043 +#define regMP1_SMN_C2PMSG_3_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_4 0x0044 +#define regMP1_SMN_C2PMSG_4_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_5 0x0045 +#define regMP1_SMN_C2PMSG_5_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_6 0x0046 +#define regMP1_SMN_C2PMSG_6_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_7 0x0047 +#define regMP1_SMN_C2PMSG_7_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_8 0x0048 +#define regMP1_SMN_C2PMSG_8_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_9 0x0049 +#define regMP1_SMN_C2PMSG_9_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_10 0x004a +#define regMP1_SMN_C2PMSG_10_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_11 0x004b +#define regMP1_SMN_C2PMSG_11_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_12 0x004c +#define regMP1_SMN_C2PMSG_12_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_13 0x004d +#define regMP1_SMN_C2PMSG_13_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_14 0x004e +#define regMP1_SMN_C2PMSG_14_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_15 0x004f +#define regMP1_SMN_C2PMSG_15_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_16 0x0050 +#define regMP1_SMN_C2PMSG_16_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_17 0x0051 +#define regMP1_SMN_C2PMSG_17_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_18 0x0052 +#define regMP1_SMN_C2PMSG_18_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_19 0x0053 +#define regMP1_SMN_C2PMSG_19_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_20 0x0054 +#define regMP1_SMN_C2PMSG_20_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_21 0x0055 +#define regMP1_SMN_C2PMSG_21_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_22 0x0056 +#define regMP1_SMN_C2PMSG_22_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_23 0x0057 +#define regMP1_SMN_C2PMSG_23_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_24 0x0058 +#define regMP1_SMN_C2PMSG_24_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_25 0x0059 +#define regMP1_SMN_C2PMSG_25_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_26 0x005a +#define regMP1_SMN_C2PMSG_26_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_27 0x005b +#define regMP1_SMN_C2PMSG_27_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_28 0x005c +#define regMP1_SMN_C2PMSG_28_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_29 0x005d +#define regMP1_SMN_C2PMSG_29_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_30 0x005e +#define regMP1_SMN_C2PMSG_30_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_31 0x005f +#define regMP1_SMN_C2PMSG_31_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_32 0x0060 +#define regMP1_SMN_C2PMSG_32_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_33 0x0061 +#define regMP1_SMN_C2PMSG_33_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_34 0x0062 +#define regMP1_SMN_C2PMSG_34_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_35 0x0063 +#define regMP1_SMN_C2PMSG_35_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_36 0x0064 +#define regMP1_SMN_C2PMSG_36_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_37 0x0065 +#define regMP1_SMN_C2PMSG_37_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_38 0x0066 +#define regMP1_SMN_C2PMSG_38_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_39 0x0067 +#define regMP1_SMN_C2PMSG_39_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_40 0x0068 +#define regMP1_SMN_C2PMSG_40_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_41 0x0069 +#define regMP1_SMN_C2PMSG_41_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_42 0x006a +#define regMP1_SMN_C2PMSG_42_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_43 0x006b +#define regMP1_SMN_C2PMSG_43_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_44 0x006c +#define regMP1_SMN_C2PMSG_44_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_45 0x006d +#define regMP1_SMN_C2PMSG_45_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_46 0x006e +#define regMP1_SMN_C2PMSG_46_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_47 0x006f +#define regMP1_SMN_C2PMSG_47_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_48 0x0070 +#define regMP1_SMN_C2PMSG_48_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_49 0x0071 +#define regMP1_SMN_C2PMSG_49_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_50 0x0072 +#define regMP1_SMN_C2PMSG_50_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_51 0x0073 +#define regMP1_SMN_C2PMSG_51_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_52 0x0074 +#define regMP1_SMN_C2PMSG_52_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_53 0x0075 +#define regMP1_SMN_C2PMSG_53_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_54 0x0076 +#define regMP1_SMN_C2PMSG_54_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_55 0x0077 +#define regMP1_SMN_C2PMSG_55_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_56 0x0078 +#define regMP1_SMN_C2PMSG_56_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_57 0x0079 +#define regMP1_SMN_C2PMSG_57_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_58 0x007a +#define regMP1_SMN_C2PMSG_58_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_59 0x007b +#define regMP1_SMN_C2PMSG_59_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_60 0x007c +#define regMP1_SMN_C2PMSG_60_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_61 0x007d +#define regMP1_SMN_C2PMSG_61_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_62 0x007e +#define regMP1_SMN_C2PMSG_62_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_63 0x007f +#define regMP1_SMN_C2PMSG_63_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_64 0x0080 +#define regMP1_SMN_C2PMSG_64_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_65 0x0081 +#define regMP1_SMN_C2PMSG_65_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_66 0x0082 +#define regMP1_SMN_C2PMSG_66_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_67 0x0083 +#define regMP1_SMN_C2PMSG_67_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_68 0x0084 +#define regMP1_SMN_C2PMSG_68_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_69 0x0085 +#define regMP1_SMN_C2PMSG_69_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_70 0x0086 +#define regMP1_SMN_C2PMSG_70_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_71 0x0087 +#define regMP1_SMN_C2PMSG_71_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_72 0x0088 +#define regMP1_SMN_C2PMSG_72_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_73 0x0089 +#define regMP1_SMN_C2PMSG_73_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_74 0x008a +#define regMP1_SMN_C2PMSG_74_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_75 0x008b +#define regMP1_SMN_C2PMSG_75_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_76 0x008c +#define regMP1_SMN_C2PMSG_76_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_77 0x008d +#define regMP1_SMN_C2PMSG_77_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_78 0x008e +#define regMP1_SMN_C2PMSG_78_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_79 0x008f +#define regMP1_SMN_C2PMSG_79_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_80 0x0090 +#define regMP1_SMN_C2PMSG_80_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_81 0x0091 +#define regMP1_SMN_C2PMSG_81_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_82 0x0092 +#define regMP1_SMN_C2PMSG_82_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_83 0x0093 +#define regMP1_SMN_C2PMSG_83_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_84 0x0094 +#define regMP1_SMN_C2PMSG_84_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_85 0x0095 +#define regMP1_SMN_C2PMSG_85_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_86 0x0096 +#define regMP1_SMN_C2PMSG_86_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_87 0x0097 +#define regMP1_SMN_C2PMSG_87_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_88 0x0098 +#define regMP1_SMN_C2PMSG_88_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_89 0x0099 +#define regMP1_SMN_C2PMSG_89_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_90 0x009a +#define regMP1_SMN_C2PMSG_90_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_91 0x009b +#define regMP1_SMN_C2PMSG_91_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_92 0x009c +#define regMP1_SMN_C2PMSG_92_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_93 0x009d +#define regMP1_SMN_C2PMSG_93_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_94 0x009e +#define regMP1_SMN_C2PMSG_94_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_95 0x009f +#define regMP1_SMN_C2PMSG_95_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_96 0x00a0 +#define regMP1_SMN_C2PMSG_96_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_97 0x00a1 +#define regMP1_SMN_C2PMSG_97_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_98 0x00a2 +#define regMP1_SMN_C2PMSG_98_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_99 0x00a3 +#define regMP1_SMN_C2PMSG_99_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_100 0x00a4 +#define regMP1_SMN_C2PMSG_100_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_101 0x00a5 +#define regMP1_SMN_C2PMSG_101_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_102 0x00a6 +#define regMP1_SMN_C2PMSG_102_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_103 0x00a7 +#define regMP1_SMN_C2PMSG_103_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_104 0x00a8 +#define regMP1_SMN_C2PMSG_104_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_105 0x00a9 +#define regMP1_SMN_C2PMSG_105_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_106 0x00aa +#define regMP1_SMN_C2PMSG_106_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_107 0x00ab +#define regMP1_SMN_C2PMSG_107_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_108 0x00ac +#define regMP1_SMN_C2PMSG_108_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_109 0x00ad +#define regMP1_SMN_C2PMSG_109_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_110 0x00ae +#define regMP1_SMN_C2PMSG_110_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_111 0x00af +#define regMP1_SMN_C2PMSG_111_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_112 0x00b0 +#define regMP1_SMN_C2PMSG_112_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_113 0x00b1 +#define regMP1_SMN_C2PMSG_113_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_114 0x00b2 +#define regMP1_SMN_C2PMSG_114_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_115 0x00b3 +#define regMP1_SMN_C2PMSG_115_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_116 0x00b4 +#define regMP1_SMN_C2PMSG_116_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_117 0x00b5 +#define regMP1_SMN_C2PMSG_117_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_118 0x00b6 +#define regMP1_SMN_C2PMSG_118_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_119 0x00b7 +#define regMP1_SMN_C2PMSG_119_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_120 0x00b8 +#define regMP1_SMN_C2PMSG_120_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_121 0x00b9 +#define regMP1_SMN_C2PMSG_121_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_122 0x00ba +#define regMP1_SMN_C2PMSG_122_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_123 0x00bb +#define regMP1_SMN_C2PMSG_123_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_124 0x00bc +#define regMP1_SMN_C2PMSG_124_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_125 0x00bd +#define regMP1_SMN_C2PMSG_125_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_126 0x00be +#define regMP1_SMN_C2PMSG_126_BASE_IDX 1 +#define regMP1_SMN_C2PMSG_127 0x00bf +#define regMP1_SMN_C2PMSG_127_BASE_IDX 1 +#define regMP1_SMN_IH_CREDIT 0x0140 +#define regMP1_SMN_IH_CREDIT_BASE_IDX 1 +#define regMP1_SMN_IH_SW_INT 0x0141 +#define regMP1_SMN_IH_SW_INT_BASE_IDX 1 +#define regMP1_SMN_IH_SW_INT_CTRL 0x0142 +#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 1 +#define regMP1_SMN_FPS_CNT 0x0143 +#define regMP1_SMN_FPS_CNT_BASE_IDX 1 +#define regMP1_SMN_PUB_CTRL 0x0144 +#define regMP1_SMN_PUB_CTRL_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH0 0x01c0 +#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH1 0x01c1 +#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH2 0x01c2 +#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH3 0x01c3 +#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH4 0x01c4 +#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH5 0x01c5 +#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH6 0x01c6 +#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH7 0x01c7 +#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH8 0x01c8 +#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH9 0x01c9 +#define regMP1_SMN_EXT_SCRATCH9_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH10 0x01ca +#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH11 0x01cb +#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH12 0x01cc +#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH13 0x01cd +#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH14 0x01ce +#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH15 0x01cf +#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH16 0x01d0 +#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH17 0x01d1 +#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH18 0x01d2 +#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH19 0x01d3 +#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH20 0x01d4 +#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH21 0x01d5 +#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH22 0x01d6 +#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH23 0x01d7 +#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH24 0x01d8 +#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH25 0x01d9 +#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH26 0x01da +#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH27 0x01db +#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH28 0x01dc +#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH29 0x01dd +#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH30 0x01de +#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 1 +#define regMP1_SMN_EXT_SCRATCH31 0x01df +#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 1 + + +// addressBlock: mp_SmuMpASP_SmnDec +// base address: 0x0 +#define regMPASP_SMN_C2PMSG_32 0x0060 +#define regMPASP_SMN_C2PMSG_32_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_33 0x0061 +#define regMPASP_SMN_C2PMSG_33_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_34 0x0062 +#define regMPASP_SMN_C2PMSG_34_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_35 0x0063 +#define regMPASP_SMN_C2PMSG_35_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_36 0x0064 +#define regMPASP_SMN_C2PMSG_36_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_37 0x0065 +#define regMPASP_SMN_C2PMSG_37_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_38 0x0066 +#define regMPASP_SMN_C2PMSG_38_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_39 0x0067 +#define regMPASP_SMN_C2PMSG_39_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_60 0x007c +#define regMPASP_SMN_C2PMSG_60_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_61 0x007d +#define regMPASP_SMN_C2PMSG_61_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_62 0x007e +#define regMPASP_SMN_C2PMSG_62_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_63 0x007f +#define regMPASP_SMN_C2PMSG_63_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_64 0x0080 +#define regMPASP_SMN_C2PMSG_64_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_65 0x0081 +#define regMPASP_SMN_C2PMSG_65_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_66 0x0082 +#define regMPASP_SMN_C2PMSG_66_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_67 0x0083 +#define regMPASP_SMN_C2PMSG_67_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_68 0x0084 +#define regMPASP_SMN_C2PMSG_68_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_69 0x0085 +#define regMPASP_SMN_C2PMSG_69_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_70 0x0086 +#define regMPASP_SMN_C2PMSG_70_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_71 0x0087 +#define regMPASP_SMN_C2PMSG_71_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_72 0x0088 +#define regMPASP_SMN_C2PMSG_72_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_73 0x0089 +#define regMPASP_SMN_C2PMSG_73_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_74 0x008a +#define regMPASP_SMN_C2PMSG_74_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_75 0x008b +#define regMPASP_SMN_C2PMSG_75_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_76 0x008c +#define regMPASP_SMN_C2PMSG_76_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_77 0x008d +#define regMPASP_SMN_C2PMSG_77_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_78 0x008e +#define regMPASP_SMN_C2PMSG_78_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_79 0x008f +#define regMPASP_SMN_C2PMSG_79_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_80 0x0090 +#define regMPASP_SMN_C2PMSG_80_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_81 0x0091 +#define regMPASP_SMN_C2PMSG_81_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_82 0x0092 +#define regMPASP_SMN_C2PMSG_82_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_83 0x0093 +#define regMPASP_SMN_C2PMSG_83_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_84 0x0094 +#define regMPASP_SMN_C2PMSG_84_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_85 0x0095 +#define regMPASP_SMN_C2PMSG_85_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_86 0x0096 +#define regMPASP_SMN_C2PMSG_86_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_87 0x0097 +#define regMPASP_SMN_C2PMSG_87_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_88 0x0098 +#define regMPASP_SMN_C2PMSG_88_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_89 0x0099 +#define regMPASP_SMN_C2PMSG_89_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_100 0x00a4 +#define regMPASP_SMN_C2PMSG_100_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_101 0x00a5 +#define regMPASP_SMN_C2PMSG_101_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_102 0x00a6 +#define regMPASP_SMN_C2PMSG_102_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_103 0x00a7 +#define regMPASP_SMN_C2PMSG_103_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_109 0x00ad +#define regMPASP_SMN_C2PMSG_109_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_115 0x00b3 +#define regMPASP_SMN_C2PMSG_115_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_116 0x00b4 +#define regMPASP_SMN_C2PMSG_116_BASE_IDX 0 +#define regMPASP_SMN_C2PMSG_119_BASE_IDX 0 +#define regMPASP_SMN_IH_CREDIT 0x0140 +#define regMPASP_SMN_IH_CREDIT_BASE_IDX 0 +#define regMPASP_SMN_IH_SW_INT 0x0141 +#define regMPASP_SMN_IH_SW_INT_BASE_IDX 0 +#define regMPASP_SMN_IH_SW_INT_CTRL 0x0142 +#define regMPASP_SMN_IH_SW_INT_CTRL_BASE_IDX 0 + + +// addressBlock: Mp1MmioPublic_SmuMp1Pub_CruDec +// base address: 0x3b00000 +#define regMP1_CRU1_MP1_FIRMWARE_FLAGS 0x4009 +#define regMP1_CRU1_MP1_FIRMWARE_FLAGS_BASE_IDX 7 + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h new file mode 100644 index 000000000000..3ba269da1463 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h @@ -0,0 +1,692 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _mp_14_0_2_SH_MASK_HEADER +#define _mp_14_0_2_SH_MASK_HEADER + + +// addressBlock: mp_SmuMp1_SmnDec +//MP1_SMN_C2PMSG_0 +#define MP1_SMN_C2PMSG_0__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_1 +#define MP1_SMN_C2PMSG_1__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_2 +#define MP1_SMN_C2PMSG_2__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_3 +#define MP1_SMN_C2PMSG_3__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_4 +#define MP1_SMN_C2PMSG_4__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_5 +#define MP1_SMN_C2PMSG_5__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_6 +#define MP1_SMN_C2PMSG_6__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_7 +#define MP1_SMN_C2PMSG_7__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_8 +#define MP1_SMN_C2PMSG_8__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_9 +#define MP1_SMN_C2PMSG_9__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_10 +#define MP1_SMN_C2PMSG_10__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_11 +#define MP1_SMN_C2PMSG_11__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_12 +#define MP1_SMN_C2PMSG_12__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_13 +#define MP1_SMN_C2PMSG_13__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_14 +#define MP1_SMN_C2PMSG_14__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_15 +#define MP1_SMN_C2PMSG_15__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_16 +#define MP1_SMN_C2PMSG_16__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_17 +#define MP1_SMN_C2PMSG_17__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_18 +#define MP1_SMN_C2PMSG_18__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_19 +#define MP1_SMN_C2PMSG_19__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_20 +#define MP1_SMN_C2PMSG_20__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_21 +#define MP1_SMN_C2PMSG_21__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_22 +#define MP1_SMN_C2PMSG_22__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_23 +#define MP1_SMN_C2PMSG_23__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_24 +#define MP1_SMN_C2PMSG_24__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_25 +#define MP1_SMN_C2PMSG_25__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_26 +#define MP1_SMN_C2PMSG_26__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_27 +#define MP1_SMN_C2PMSG_27__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_28 +#define MP1_SMN_C2PMSG_28__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_29 +#define MP1_SMN_C2PMSG_29__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_30 +#define MP1_SMN_C2PMSG_30__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_31 +#define MP1_SMN_C2PMSG_31__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_32 +#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_33 +#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_34 +#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_35 +#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_36 +#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_37 +#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_38 +#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_39 +#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_40 +#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_41 +#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_42 +#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_43 +#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_44 +#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_45 +#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_46 +#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_47 +#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_48 +#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_49 +#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_50 +#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_51 +#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_52 +#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_53 +#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_54 +#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_55 +#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_56 +#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_57 +#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_58 +#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_59 +#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_60 +#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_61 +#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_62 +#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_63 +#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_64 +#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_65 +#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_66 +#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_67 +#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_68 +#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_69 +#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_70 +#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_71 +#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_72 +#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_73 +#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_74 +#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_75 +#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_76 +#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_77 +#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_78 +#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_79 +#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_80 +#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_81 +#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_82 +#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_83 +#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_84 +#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_85 +#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_86 +#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_87 +#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_88 +#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_89 +#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_90 +#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_91 +#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_92 +#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_93 +#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_94 +#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_95 +#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_96 +#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_97 +#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_98 +#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_99 +#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_100 +#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_101 +#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_102 +#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_103 +#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_104 +#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_105 +#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_106 +#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_107 +#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_108 +#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_109 +#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_110 +#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_111 +#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_112 +#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_113 +#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_114 +#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_115 +#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_116 +#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_117 +#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_118 +#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_119 +#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_120 +#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_121 +#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_122 +#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_123 +#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_124 +#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_125 +#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_126 +#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_127 +#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_IH_CREDIT +#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 +#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 +#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L +#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L +//MP1_SMN_IH_SW_INT +#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8 +#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL +#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L +//MP1_SMN_IH_SW_INT_CTRL +#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 +#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L +#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L +//MP1_SMN_FPS_CNT +#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0 +#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL +//MP1_SMN_PUB_CTRL +#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0 +#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L +//MP1_SMN_EXT_SCRATCH0 +#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH1 +#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH2 +#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH3 +#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH4 +#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH5 +#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH6 +#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH7 +#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH8 +#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH9 +#define MP1_SMN_EXT_SCRATCH9__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH9__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH10 +#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH11 +#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH12 +#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH13 +#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH14 +#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH15 +#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH16 +#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH17 +#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH18 +#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH19 +#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH20 +#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH21 +#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH22 +#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH23 +#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH24 +#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH25 +#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH26 +#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH27 +#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH28 +#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH29 +#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH30 +#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH31 +#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: mp_SmuMpASP_SmnDec +//MPASP_SMN_C2PMSG_32 +#define MPASP_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_33 +#define MPASP_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_34 +#define MPASP_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_35 +#define MPASP_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_36 +#define MPASP_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_37 +#define MPASP_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_38 +#define MPASP_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_39 +#define MPASP_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_60 +#define MPASP_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_61 +#define MPASP_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_62 +#define MPASP_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_63 +#define MPASP_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_64 +#define MPASP_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_65 +#define MPASP_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_66 +#define MPASP_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_67 +#define MPASP_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_68 +#define MPASP_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_69 +#define MPASP_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_70 +#define MPASP_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_71 +#define MPASP_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_72 +#define MPASP_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_73 +#define MPASP_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_74 +#define MPASP_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_75 +#define MPASP_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_76 +#define MPASP_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_77 +#define MPASP_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_78 +#define MPASP_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_79 +#define MPASP_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_80 +#define MPASP_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_81 +#define MPASP_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_82 +#define MPASP_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_83 +#define MPASP_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_84 +#define MPASP_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_85 +#define MPASP_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_86 +#define MPASP_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_87 +#define MPASP_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_88 +#define MPASP_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_89 +#define MPASP_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_100 +#define MPASP_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_101 +#define MPASP_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_102 +#define MPASP_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_103 +#define MPASP_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_109 +#define MPASP_SMN_C2PMSG_109__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_115 +#define MPASP_SMN_C2PMSG_115__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_C2PMSG_116 +#define MPASP_SMN_C2PMSG_116__CONTENT__SHIFT 0x0 +#define MPASP_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL +//MPASP_SMN_IH_CREDIT +#define MPASP_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 +#define MPASP_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 +#define MPASP_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L +#define MPASP_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L +//MPASP_SMN_IH_SW_INT +#define MPASP_SMN_IH_SW_INT__ID__SHIFT 0x0 +#define MPASP_SMN_IH_SW_INT__VALID__SHIFT 0x8 +#define MPASP_SMN_IH_SW_INT__ID_MASK 0x000000FFL +#define MPASP_SMN_IH_SW_INT__VALID_MASK 0x00000100L +//MPASP_SMN_IH_SW_INT_CTRL +#define MPASP_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 +#define MPASP_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 +#define MPASP_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L +#define MPASP_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L + + +// addressBlock: Mp1MmioPublic_SmuMp1Pub_CruDec +//MP1_CRU1_MP1_FIRMWARE_FLAGS +#define MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 +#define MP1_CRU1_MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 +#define MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L +#define MP1_CRU1_MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h index 6f80bfa7e41a..5ebe4cb40f9d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h @@ -8900,6 +8900,8 @@ #define regGDC0_BIF_IH_DOORBELL_RANGE_BASE_IDX 3 #define regGDC0_BIF_VCN0_DOORBELL_RANGE 0x4f0af3 #define regGDC0_BIF_VCN0_DOORBELL_RANGE_BASE_IDX 3 +#define regGDC0_BIF_VPE1_DOORBELL_RANGE 0x4f0af4 +#define regGDC0_BIF_VPE1_DOORBELL_RANGE_BASE_IDX 3 #define regGDC0_BIF_RLC_DOORBELL_RANGE 0x4f0af5 #define regGDC0_BIF_RLC_DOORBELL_RANGE_BASE_IDX 3 #define regGDC0_BIF_SDMA2_DOORBELL_RANGE 0x4f0af6 diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h new file mode 100644 index 000000000000..45a961ef74ff --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h @@ -0,0 +1,279 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _osssys_7_0_0_OFFSET_HEADER +#define _osssys_7_0_0_OFFSET_HEADER + + + +// addressBlock: osssys_osssysdec +// base address: 0x4280 +#define regIH_VMID_0_LUT 0x0000 +#define regIH_VMID_0_LUT_BASE_IDX 0 +#define regIH_VMID_1_LUT 0x0001 +#define regIH_VMID_1_LUT_BASE_IDX 0 +#define regIH_VMID_2_LUT 0x0002 +#define regIH_VMID_2_LUT_BASE_IDX 0 +#define regIH_VMID_3_LUT 0x0003 +#define regIH_VMID_3_LUT_BASE_IDX 0 +#define regIH_VMID_4_LUT 0x0004 +#define regIH_VMID_4_LUT_BASE_IDX 0 +#define regIH_VMID_5_LUT 0x0005 +#define regIH_VMID_5_LUT_BASE_IDX 0 +#define regIH_VMID_6_LUT 0x0006 +#define regIH_VMID_6_LUT_BASE_IDX 0 +#define regIH_VMID_7_LUT 0x0007 +#define regIH_VMID_7_LUT_BASE_IDX 0 +#define regIH_VMID_8_LUT 0x0008 +#define regIH_VMID_8_LUT_BASE_IDX 0 +#define regIH_VMID_9_LUT 0x0009 +#define regIH_VMID_9_LUT_BASE_IDX 0 +#define regIH_VMID_10_LUT 0x000a +#define regIH_VMID_10_LUT_BASE_IDX 0 +#define regIH_VMID_11_LUT 0x000b +#define regIH_VMID_11_LUT_BASE_IDX 0 +#define regIH_VMID_12_LUT 0x000c +#define regIH_VMID_12_LUT_BASE_IDX 0 +#define regIH_VMID_13_LUT 0x000d +#define regIH_VMID_13_LUT_BASE_IDX 0 +#define regIH_VMID_14_LUT 0x000e +#define regIH_VMID_14_LUT_BASE_IDX 0 +#define regIH_VMID_15_LUT 0x000f +#define regIH_VMID_15_LUT_BASE_IDX 0 +#define regIH_VMID_0_LUT_MM 0x0010 +#define regIH_VMID_0_LUT_MM_BASE_IDX 0 +#define regIH_VMID_1_LUT_MM 0x0011 +#define regIH_VMID_1_LUT_MM_BASE_IDX 0 +#define regIH_VMID_2_LUT_MM 0x0012 +#define regIH_VMID_2_LUT_MM_BASE_IDX 0 +#define regIH_VMID_3_LUT_MM 0x0013 +#define regIH_VMID_3_LUT_MM_BASE_IDX 0 +#define regIH_VMID_4_LUT_MM 0x0014 +#define regIH_VMID_4_LUT_MM_BASE_IDX 0 +#define regIH_VMID_5_LUT_MM 0x0015 +#define regIH_VMID_5_LUT_MM_BASE_IDX 0 +#define regIH_VMID_6_LUT_MM 0x0016 +#define regIH_VMID_6_LUT_MM_BASE_IDX 0 +#define regIH_VMID_7_LUT_MM 0x0017 +#define regIH_VMID_7_LUT_MM_BASE_IDX 0 +#define regIH_VMID_8_LUT_MM 0x0018 +#define regIH_VMID_8_LUT_MM_BASE_IDX 0 +#define regIH_VMID_9_LUT_MM 0x0019 +#define regIH_VMID_9_LUT_MM_BASE_IDX 0 +#define regIH_VMID_10_LUT_MM 0x001a +#define regIH_VMID_10_LUT_MM_BASE_IDX 0 +#define regIH_VMID_11_LUT_MM 0x001b +#define regIH_VMID_11_LUT_MM_BASE_IDX 0 +#define regIH_VMID_12_LUT_MM 0x001c +#define regIH_VMID_12_LUT_MM_BASE_IDX 0 +#define regIH_VMID_13_LUT_MM 0x001d +#define regIH_VMID_13_LUT_MM_BASE_IDX 0 +#define regIH_VMID_14_LUT_MM 0x001e +#define regIH_VMID_14_LUT_MM_BASE_IDX 0 +#define regIH_VMID_15_LUT_MM 0x001f +#define regIH_VMID_15_LUT_MM_BASE_IDX 0 +#define regIH_COOKIE_0 0x0020 +#define regIH_COOKIE_0_BASE_IDX 0 +#define regIH_COOKIE_1 0x0021 +#define regIH_COOKIE_1_BASE_IDX 0 +#define regIH_COOKIE_2 0x0022 +#define regIH_COOKIE_2_BASE_IDX 0 +#define regIH_COOKIE_3 0x0023 +#define regIH_COOKIE_3_BASE_IDX 0 +#define regIH_COOKIE_4 0x0024 +#define regIH_COOKIE_4_BASE_IDX 0 +#define regIH_COOKIE_5 0x0025 +#define regIH_COOKIE_5_BASE_IDX 0 +#define regIH_COOKIE_6 0x0026 +#define regIH_COOKIE_6_BASE_IDX 0 +#define regIH_COOKIE_7 0x0027 +#define regIH_COOKIE_7_BASE_IDX 0 +#define regIH_REGISTER_LAST_PART0 0x003f +#define regIH_REGISTER_LAST_PART0_BASE_IDX 0 +#define regIH_RB_CNTL 0x0080 +#define regIH_RB_CNTL_BASE_IDX 0 +#define regIH_RB_RPTR 0x0081 +#define regIH_RB_RPTR_BASE_IDX 0 +#define regIH_RB_WPTR 0x0082 +#define regIH_RB_WPTR_BASE_IDX 0 +#define regIH_RB_BASE 0x0083 +#define regIH_RB_BASE_BASE_IDX 0 +#define regIH_RB_BASE_HI 0x0084 +#define regIH_RB_BASE_HI_BASE_IDX 0 +#define regIH_RB_WPTR_ADDR_HI 0x0085 +#define regIH_RB_WPTR_ADDR_HI_BASE_IDX 0 +#define regIH_RB_WPTR_ADDR_LO 0x0086 +#define regIH_RB_WPTR_ADDR_LO_BASE_IDX 0 +#define regIH_DOORBELL_RPTR 0x0087 +#define regIH_DOORBELL_RPTR_BASE_IDX 0 +#define regIH_DOORBELL_RETRY_CAM 0x0088 +#define regIH_DOORBELL_RETRY_CAM_BASE_IDX 0 +#define regIH_RB_CNTL_RING1 0x008c +#define regIH_RB_CNTL_RING1_BASE_IDX 0 +#define regIH_RB_RPTR_RING1 0x008d +#define regIH_RB_RPTR_RING1_BASE_IDX 0 +#define regIH_RB_WPTR_RING1 0x008e +#define regIH_RB_WPTR_RING1_BASE_IDX 0 +#define regIH_RB_BASE_RING1 0x008f +#define regIH_RB_BASE_RING1_BASE_IDX 0 +#define regIH_RB_BASE_HI_RING1 0x0090 +#define regIH_RB_BASE_HI_RING1_BASE_IDX 0 +#define regIH_DOORBELL_RPTR_RING1 0x0093 +#define regIH_DOORBELL_RPTR_RING1_BASE_IDX 0 +#define regIH_RETRY_CAM_ACK 0x00a4 +#define regIH_RETRY_CAM_ACK_BASE_IDX 0 +#define regIH_VERSION 0x00a5 +#define regIH_VERSION_BASE_IDX 0 +#define regIH_CNTL 0x00a8 +#define regIH_CNTL_BASE_IDX 0 +#define regIH_CLK_CTRL 0x00a9 +#define regIH_CLK_CTRL_BASE_IDX 0 +#define regIH_STORM_CLIENT_LIST_CNTL 0x00aa +#define regIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0 +#define regIH_LIMIT_INT_RATE_CNTL 0x00ab +#define regIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0 +#define regIH_RETRY_INT_CAM_CNTL 0x00ac +#define regIH_RETRY_INT_CAM_CNTL_BASE_IDX 0 +#define regIH_MEM_POWER_CTRL 0x00ad +#define regIH_MEM_POWER_CTRL_BASE_IDX 0 +#define regIH_MEM_POWER_CTRL2 0x00ae +#define regIH_MEM_POWER_CTRL2_BASE_IDX 0 +#define regIH_CNTL2 0x00c1 +#define regIH_CNTL2_BASE_IDX 0 +#define regIH_STATUS 0x00c2 +#define regIH_STATUS_BASE_IDX 0 +#define regIH_PERFMON_CNTL 0x00c3 +#define regIH_PERFMON_CNTL_BASE_IDX 0 +#define regIH_PERFCOUNTER0_RESULT 0x00c4 +#define regIH_PERFCOUNTER0_RESULT_BASE_IDX 0 +#define regIH_PERFCOUNTER1_RESULT 0x00c5 +#define regIH_PERFCOUNTER1_RESULT_BASE_IDX 0 +#define regIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7 +#define regIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0 +#define regIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8 +#define regIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0 +#define regIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9 +#define regIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0 +#define regIH_DSM_MATCH_FIELD_CONTROL 0x00ca +#define regIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0 +#define regIH_DSM_MATCH_DATA_CONTROL 0x00cb +#define regIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0 +#define regIH_DSM_MATCH_FCN_ID 0x00cc +#define regIH_DSM_MATCH_FCN_ID_BASE_IDX 0 +#define regIH_VF_RB_STATUS 0x00ce +#define regIH_VF_RB_STATUS_BASE_IDX 0 +#define regIH_VF_RB_STATUS2 0x00cf +#define regIH_VF_RB_STATUS2_BASE_IDX 0 +#define regIH_VF_RB1_STATUS 0x00d0 +#define regIH_VF_RB1_STATUS_BASE_IDX 0 +#define regIH_VF_RB1_STATUS2 0x00d1 +#define regIH_VF_RB1_STATUS2_BASE_IDX 0 +#define regIH_RB_STATUS 0x00d4 +#define regIH_RB_STATUS_BASE_IDX 0 +#define regIH_INT_FLOOD_CNTL 0x00d5 +#define regIH_INT_FLOOD_CNTL_BASE_IDX 0 +#define regIH_RB0_INT_FLOOD_STATUS 0x00d6 +#define regIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0 +#define regIH_RB1_INT_FLOOD_STATUS 0x00d7 +#define regIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0 +#define regIH_INT_FLOOD_STATUS 0x00d9 +#define regIH_INT_FLOOD_STATUS_BASE_IDX 0 +#define regIH_INT_FLAGS 0x00dc +#define regIH_INT_FLAGS_BASE_IDX 0 +#define regIH_SCRATCH 0x00e0 +#define regIH_SCRATCH_BASE_IDX 0 +#define regIH_CLIENT_CREDIT_ERROR 0x00e1 +#define regIH_CLIENT_CREDIT_ERROR_BASE_IDX 0 +#define regIH_GPU_IOV_VIOLATION_LOG 0x00e2 +#define regIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define regIH_GPU_IOV_VIOLATION_LOG2 0x00e3 +#define regIH_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0 +#define regIH_COOKIE_REC_VIOLATION_LOG 0x00e4 +#define regIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0 +#define regIH_CREDIT_STATUS 0x00e5 +#define regIH_CREDIT_STATUS_BASE_IDX 0 +#define regIH_MMHUB_ERROR 0x00e6 +#define regIH_MMHUB_ERROR_BASE_IDX 0 +#define regIH_VF_RB_STATUS3 0x00ea +#define regIH_VF_RB_STATUS3_BASE_IDX 0 +#define regIH_VF_RB_STATUS4 0x00eb +#define regIH_VF_RB_STATUS4_BASE_IDX 0 +#define regIH_VF_RB1_STATUS3 0x00ec +#define regIH_VF_RB1_STATUS3_BASE_IDX 0 +#define regIH_MSI_STORM_CTRL 0x00f1 +#define regIH_MSI_STORM_CTRL_BASE_IDX 0 +#define regIH_MSI_STORM_CLIENT_INDEX 0x00f2 +#define regIH_MSI_STORM_CLIENT_INDEX_BASE_IDX 0 +#define regIH_MSI_STORM_CLIENT_DATA 0x00f3 +#define regIH_MSI_STORM_CLIENT_DATA_BASE_IDX 0 +#define regIH_LAST_INT_INFO0 0x00f9 +#define regIH_LAST_INT_INFO0_BASE_IDX 0 +#define regIH_LAST_INT_INFO1 0x00fa +#define regIH_LAST_INT_INFO1_BASE_IDX 0 +#define regIH_LAST_INT_INFO2 0x00fb +#define regIH_LAST_INT_INFO2_BASE_IDX 0 +#define regIH_REGISTER_LAST_PART2 0x00ff +#define regIH_REGISTER_LAST_PART2_BASE_IDX 0 +#define regSEM_MAILBOX 0x010a +#define regSEM_MAILBOX_BASE_IDX 0 +#define regSEM_MAILBOX_CLEAR 0x010b +#define regSEM_MAILBOX_CLEAR_BASE_IDX 0 +#define regSEM_REGISTER_LAST_PART2 0x017f +#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0 +#define regIH_ACTIVE_FCN_ID 0x0180 +#define regIH_ACTIVE_FCN_ID_BASE_IDX 0 +#define regIH_VIRT_RESET_REQ 0x0181 +#define regIH_VIRT_RESET_REQ_BASE_IDX 0 +#define regIH_CLIENT_CFG 0x0182 +#define regIH_CLIENT_CFG_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_INDEX 0x0183 +#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_DATA 0x0184 +#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0 +#define regIH_CLIENT_CFG_INDEX 0x0185 +#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_CLIENT_CFG_DATA 0x0186 +#define regIH_CLIENT_CFG_DATA_BASE_IDX 0 +#define regIH_CLIENT_CFG_DATA2 0x0187 +#define regIH_CLIENT_CFG_DATA2_BASE_IDX 0 +#define regIH_CID_REMAP_INDEX 0x0188 +#define regIH_CID_REMAP_INDEX_BASE_IDX 0 +#define regIH_CID_REMAP_DATA 0x0189 +#define regIH_CID_REMAP_DATA_BASE_IDX 0 +#define regIH_CHICKEN 0x018a +#define regIH_CHICKEN_BASE_IDX 0 +#define regIH_INT_DROP_CNTL 0x018c +#define regIH_INT_DROP_CNTL_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_VALUE0 0x018d +#define regIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_VALUE1 0x018e +#define regIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_MASK0 0x018f +#define regIH_INT_DROP_MATCH_MASK0_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_MASK1 0x0190 +#define regIH_INT_DROP_MATCH_MASK1_BASE_IDX 0 +#define regIH_MMHUB_CNTL 0x01a7 +#define regIH_MMHUB_CNTL_BASE_IDX 0 +#define regIH_REGISTER_LAST_PART1 0x01a8 +#define regIH_REGISTER_LAST_PART1_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h new file mode 100644 index 000000000000..a29607bc0db5 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h @@ -0,0 +1,1029 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _osssys_7_0_0_SH_MASK_HEADER +#define _osssys_7_0_0_SH_MASK_HEADER + + +// addressBlock: osssys_osssysdec +//IH_VMID_0_LUT +#define IH_VMID_0_LUT__PASID__SHIFT 0x0 +#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_1_LUT +#define IH_VMID_1_LUT__PASID__SHIFT 0x0 +#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_2_LUT +#define IH_VMID_2_LUT__PASID__SHIFT 0x0 +#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_3_LUT +#define IH_VMID_3_LUT__PASID__SHIFT 0x0 +#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_4_LUT +#define IH_VMID_4_LUT__PASID__SHIFT 0x0 +#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_5_LUT +#define IH_VMID_5_LUT__PASID__SHIFT 0x0 +#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_6_LUT +#define IH_VMID_6_LUT__PASID__SHIFT 0x0 +#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_7_LUT +#define IH_VMID_7_LUT__PASID__SHIFT 0x0 +#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_8_LUT +#define IH_VMID_8_LUT__PASID__SHIFT 0x0 +#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_9_LUT +#define IH_VMID_9_LUT__PASID__SHIFT 0x0 +#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_10_LUT +#define IH_VMID_10_LUT__PASID__SHIFT 0x0 +#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_11_LUT +#define IH_VMID_11_LUT__PASID__SHIFT 0x0 +#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_12_LUT +#define IH_VMID_12_LUT__PASID__SHIFT 0x0 +#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_13_LUT +#define IH_VMID_13_LUT__PASID__SHIFT 0x0 +#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_14_LUT +#define IH_VMID_14_LUT__PASID__SHIFT 0x0 +#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_15_LUT +#define IH_VMID_15_LUT__PASID__SHIFT 0x0 +#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_0_LUT_MM +#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_1_LUT_MM +#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_2_LUT_MM +#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_3_LUT_MM +#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_4_LUT_MM +#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_5_LUT_MM +#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_6_LUT_MM +#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_7_LUT_MM +#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_8_LUT_MM +#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_9_LUT_MM +#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_10_LUT_MM +#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_11_LUT_MM +#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_12_LUT_MM +#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_13_LUT_MM +#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_14_LUT_MM +#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_15_LUT_MM +#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_COOKIE_0 +#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0 +#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8 +#define IH_COOKIE_0__RING_ID__SHIFT 0x10 +#define IH_COOKIE_0__VM_ID__SHIFT 0x18 +#define IH_COOKIE_0__RESERVED__SHIFT 0x1c +#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f +#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL +#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L +#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L +#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L +#define IH_COOKIE_0__RESERVED_MASK 0x70000000L +#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L +//IH_COOKIE_1 +#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0 +#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL +//IH_COOKIE_2 +#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0 +#define IH_COOKIE_2__RESERVED__SHIFT 0x10 +#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f +#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL +#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L +#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L +//IH_COOKIE_3 +#define IH_COOKIE_3__PAS_ID__SHIFT 0x0 +#define IH_COOKIE_3__RESERVED__SHIFT 0x10 +#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f +#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL +#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L +#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L +//IH_COOKIE_4 +#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0 +#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL +//IH_COOKIE_5 +#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0 +#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL +//IH_COOKIE_6 +#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0 +#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL +//IH_COOKIE_7 +#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0 +#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL +//IH_REGISTER_LAST_PART0 +#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0 +#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL +//IH_RB_CNTL +#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8 +#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9 +#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa +#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb +#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc +#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10 +#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11 +#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12 +#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14 +#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15 +#define IH_RB_CNTL__MC_RO__SHIFT 0x16 +#define IH_RB_CNTL__MC_VMID__SHIFT 0x18 +#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c +#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f +#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L +#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L +#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L +#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L +#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L +#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L +#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L +#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L +#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L +#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L +#define IH_RB_CNTL__MC_RO_MASK 0x00400000L +#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L +#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L +#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L +//IH_RB_RPTR +#define IH_RB_RPTR__OFFSET__SHIFT 0x2 +#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL +//IH_RB_WPTR +#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0 +#define IH_RB_WPTR__OFFSET__SHIFT 0x2 +#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12 +#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13 +#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L +#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL +#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L +#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L +//IH_RB_BASE +#define IH_RB_BASE__ADDR__SHIFT 0x0 +#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//IH_RB_BASE_HI +#define IH_RB_BASE_HI__ADDR__SHIFT 0x0 +#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL +//IH_RB_WPTR_ADDR_HI +#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL +//IH_RB_WPTR_ADDR_LO +#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//IH_DOORBELL_RPTR +#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0 +#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c +#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL +#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L +//IH_DOORBELL_RETRY_CAM +#define IH_DOORBELL_RETRY_CAM__OFFSET__SHIFT 0x0 +#define IH_DOORBELL_RETRY_CAM__ENABLE__SHIFT 0x1c +#define IH_DOORBELL_RETRY_CAM__OFFSET_MASK 0x03FFFFFFL +#define IH_DOORBELL_RETRY_CAM__ENABLE_MASK 0x10000000L +//IH_RB_CNTL_RING1 +#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0 +#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1 +#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9 +#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa +#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb +#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10 +#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12 +#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14 +#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16 +#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18 +#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f +#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L +#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL +#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L +#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L +#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L +#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L +#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L +#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L +#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L +#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L +#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L +//IH_RB_RPTR_RING1 +#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2 +#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL +//IH_RB_WPTR_RING1 +#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0 +#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2 +#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12 +#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13 +#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L +#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL +#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L +#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L +//IH_RB_BASE_RING1 +#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0 +#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL +//IH_RB_BASE_HI_RING1 +#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0 +#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL +//IH_DOORBELL_RPTR_RING1 +#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0 +#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c +#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL +#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L +//IH_RETRY_CAM_ACK +#define IH_RETRY_CAM_ACK__INDEX__SHIFT 0x0 +#define IH_RETRY_CAM_ACK__INDEX_MASK 0x000003FFL +//IH_VERSION +#define IH_VERSION__MINVER__SHIFT 0x0 +#define IH_VERSION__MAJVER__SHIFT 0x8 +#define IH_VERSION__REV__SHIFT 0x10 +#define IH_VERSION__MINVER_MASK 0x0000007FL +#define IH_VERSION__MAJVER_MASK 0x00007F00L +#define IH_VERSION__REV_MASK 0x003F0000L +//IH_CNTL +#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0 +#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6 +#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8 +#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14 +#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL +#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L +#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L +#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L +//IH_CLK_CTRL +#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x17 +#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE__SHIFT 0x18 +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19 +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a +#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b +#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c +#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d +#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e +#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f +#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE_MASK 0x00800000L +#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE_MASK 0x01000000L +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L +#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L +#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L +#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L +#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L +#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L +//IH_STORM_CLIENT_LIST_CNTL +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L +//IH_LIMIT_INT_RATE_CNTL +#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0 +#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1 +#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5 +#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11 +#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15 +#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L +#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL +#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L +#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L +#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L +//IH_RETRY_INT_CAM_CNTL +#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0 +#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8 +#define IH_RETRY_INT_CAM_CNTL__ENABLE__SHIFT 0x10 +#define IH_RETRY_INT_CAM_CNTL__MM_BACK_PRESSURE_ENABLE__SHIFT 0x11 +#define IH_RETRY_INT_CAM_CNTL__GC_BACK_PRESSURE_ENABLE__SHIFT 0x12 +#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14 +#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL +#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00003F00L +#define IH_RETRY_INT_CAM_CNTL__ENABLE_MASK 0x00010000L +#define IH_RETRY_INT_CAM_CNTL__MM_BACK_PRESSURE_ENABLE_MASK 0x00020000L +#define IH_RETRY_INT_CAM_CNTL__GC_BACK_PRESSURE_ENABLE_MASK 0x00040000L +#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L +//IH_MEM_POWER_CTRL +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN__SHIFT 0x10 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN__SHIFT 0x11 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN__SHIFT 0x12 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN__SHIFT 0x13 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS__SHIFT 0x14 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN_MASK 0x00010000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN_MASK 0x00020000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN_MASK 0x00040000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN_MASK 0x00080000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS_MASK 0x00700000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L +//IH_MEM_POWER_CTRL2 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN__SHIFT 0x0 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN__SHIFT 0x1 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN__SHIFT 0x2 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN__SHIFT 0x3 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS__SHIFT 0x4 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN_MASK 0x00000002L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN_MASK 0x00000004L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN_MASK 0x00000008L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS_MASK 0x00000070L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L +//IH_CNTL2 +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0 +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8 +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L +//IH_STATUS +#define IH_STATUS__IDLE__SHIFT 0x0 +#define IH_STATUS__INPUT_IDLE__SHIFT 0x1 +#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2 +#define IH_STATUS__RB_FULL__SHIFT 0x3 +#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4 +#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5 +#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6 +#define IH_STATUS__MC_WR_STALL__SHIFT 0x7 +#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8 +#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9 +#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa +#define IH_STATUS__SWITCH_READY__SHIFT 0xb +#define IH_STATUS__RB1_FULL__SHIFT 0xc +#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd +#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe +#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12 +#define IH_STATUS__RETRY_INT_CAM_IDLE__SHIFT 0x13 +#define IH_STATUS__ZSTATES_FENCE__SHIFT 0x14 +#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED__SHIFT 0x15 +#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED__SHIFT 0x16 +#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED__SHIFT 0x17 +#define IH_STATUS__IDLE_MASK 0x00000001L +#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L +#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L +#define IH_STATUS__RB_FULL_MASK 0x00000008L +#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L +#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L +#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L +#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L +#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L +#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L +#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L +#define IH_STATUS__SWITCH_READY_MASK 0x00000800L +#define IH_STATUS__RB1_FULL_MASK 0x00001000L +#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L +#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L +#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L +#define IH_STATUS__RETRY_INT_CAM_IDLE_MASK 0x00080000L +#define IH_STATUS__ZSTATES_FENCE_MASK 0x00100000L +#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED_MASK 0x00200000L +#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED_MASK 0x00400000L +#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED_MASK 0x00800000L +//IH_PERFMON_CNTL +#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0 +#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1 +#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10 +#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11 +#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12 +#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L +#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L +#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x00000FFCL +#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L +#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L +#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0FFC0000L +//IH_PERFCOUNTER0_RESULT +#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//IH_PERFCOUNTER1_RESULT +#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_VALUE_BIT_31_0 +#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_VALUE_BIT_63_32 +#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_VALUE_BIT_95_64 +#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_FIELD_CONTROL +#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0 +#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1 +#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2 +#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3 +#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4 +#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5 +#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6 +#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN__SHIFT 0x7 +#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L +#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L +#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L +#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L +#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L +#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L +#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L +#define IH_DSM_MATCH_FIELD_CONTROL__DIEID_EN_MASK 0x00000080L +//IH_DSM_MATCH_DATA_CONTROL +#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL +//IH_DSM_MATCH_FCN_ID +#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x0 +#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x7 +#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001FL +#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000080L +//IH_VF_RB_STATUS +#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x00FFFFFFL +//IH_VF_RB_STATUS2 +#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x00FFFFFFL +//IH_VF_RB1_STATUS +#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0 +#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x00FFFFFFL +//IH_VF_RB1_STATUS2 +#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0 +#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x00FFFFFFL +//IH_RB_STATUS +#define IH_RB_STATUS__RB_FULL__SHIFT 0x0 +#define IH_RB_STATUS__RB_FULL_DRAIN__SHIFT 0x1 +#define IH_RB_STATUS__RB_OVERFLOW__SHIFT 0x2 +#define IH_RB_STATUS__RB1_FULL__SHIFT 0x4 +#define IH_RB_STATUS__RB1_FULL_DRAIN__SHIFT 0x5 +#define IH_RB_STATUS__RB1_OVERFLOW__SHIFT 0x6 +#define IH_RB_STATUS__RB_FULL_MASK 0x00000001L +#define IH_RB_STATUS__RB_FULL_DRAIN_MASK 0x00000002L +#define IH_RB_STATUS__RB_OVERFLOW_MASK 0x00000004L +#define IH_RB_STATUS__RB1_FULL_MASK 0x00000010L +#define IH_RB_STATUS__RB1_FULL_DRAIN_MASK 0x00000020L +#define IH_RB_STATUS__RB1_OVERFLOW_MASK 0x00000040L +//IH_INT_FLOOD_CNTL +#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0 +#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3 +#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4 +#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L +#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L +#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L +//IH_RB0_INT_FLOOD_STATUS +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0 +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x00FFFFFFL +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L +//IH_RB1_INT_FLOOD_STATUS +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0 +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x00FFFFFFL +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L +//IH_INT_FLOOD_STATUS +#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1d +#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e +#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x1F000000L +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x20000000L +#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L +//IH_INT_FLAGS +#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0 +#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1 +#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2 +#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3 +#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4 +#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5 +#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6 +#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7 +#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8 +#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9 +#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa +#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb +#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc +#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd +#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe +#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf +#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10 +#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11 +#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12 +#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13 +#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14 +#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15 +#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16 +#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17 +#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18 +#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19 +#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a +#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b +#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c +#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d +#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e +#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f +#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L +#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L +#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L +#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L +#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L +#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L +#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L +#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L +#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L +#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L +#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L +#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L +#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L +#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L +#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L +#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L +#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L +#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L +#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L +#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L +#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L +#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L +#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L +#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L +#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L +#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L +#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L +#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L +#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L +#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L +#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L +#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L +//IH_SCRATCH +#define IH_SCRATCH__DATA__SHIFT 0x0 +#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL +//IH_CLIENT_CREDIT_ERROR +#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa +#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb +#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc +#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd +#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe +#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf +#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a +#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b +#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c +#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d +#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e +#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f +#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L +//IH_GPU_IOV_VIOLATION_LOG +#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x16 +#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x17 +#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x18 +#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00400000L +#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00800000L +#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x1F000000L +//IH_GPU_IOV_VIOLATION_LOG2 +#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL +//IH_COOKIE_REC_VIOLATION_LOG +#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x8 +#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x10 +#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID__SHIFT 0x1a +#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x0000FF00L +#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0x03FF0000L +#define IH_COOKIE_REC_VIOLATION_LOG__DIE_ID_MASK 0x3C000000L +//IH_CREDIT_STATUS +#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1 +#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2 +#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3 +#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4 +#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5 +#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6 +#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7 +#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8 +#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9 +#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa +#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb +#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc +#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd +#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe +#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf +#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10 +#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11 +#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12 +#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13 +#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14 +#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15 +#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16 +#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17 +#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18 +#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19 +#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a +#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b +#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c +#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d +#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e +#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f +#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L +#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L +#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L +#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L +#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L +#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L +#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L +#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L +#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L +#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L +#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L +#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L +#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L +#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L +#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L +#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L +#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L +#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L +#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L +#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L +#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L +#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L +#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L +#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L +#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L +#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L +#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L +#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L +#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L +#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L +#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L +//IH_MMHUB_ERROR +#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1 +#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2 +#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3 +#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5 +#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6 +#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7 +#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L +#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L +#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L +#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L +#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L +#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L +//IH_VF_RB_STATUS3 +#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF_MASK 0x00FFFFFFL +//IH_VF_RB_STATUS4 +#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF_MASK 0x00FFFFFFL +//IH_VF_RB1_STATUS3 +#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0 +#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF_MASK 0x00FFFFFFL +//IH_MSI_STORM_CTRL +#define IH_MSI_STORM_CTRL__DELAY__SHIFT 0x0 +#define IH_MSI_STORM_CTRL__DELAY_MASK 0x00000FFFL +//IH_MSI_STORM_CLIENT_INDEX +#define IH_MSI_STORM_CLIENT_INDEX__INDEX__SHIFT 0x0 +#define IH_MSI_STORM_CLIENT_INDEX__INDEX_MASK 0x00000007L +//IH_MSI_STORM_CLIENT_DATA +#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE__SHIFT 0x11 +#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID__SHIFT 0x1f +#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L +#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE_MASK 0x00020000L +#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID_MASK 0x80000000L +//IH_LAST_INT_INFO0 +#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0 +#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8 +#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10 +#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18 +#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f +#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL +#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L +#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L +#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L +#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L +//IH_LAST_INT_INFO1 +#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0 +#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL +//IH_LAST_INT_INFO2 +#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0 +#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10 +#define IH_LAST_INT_INFO2__VF__SHIFT 0x17 +#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL +#define IH_LAST_INT_INFO2__VF_ID_MASK 0x001F0000L +#define IH_LAST_INT_INFO2__VF_MASK 0x00800000L +//IH_REGISTER_LAST_PART2 +#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0 +#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL +//SEM_MAILBOX +#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0 +#define SEM_MAILBOX__RESERVED__SHIFT 0x10 +#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL +#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L +//SEM_MAILBOX_CLEAR +#define SEM_MAILBOX_CLEAR__CLEAR__SHIFT 0x0 +#define SEM_MAILBOX_CLEAR__RESERVED__SHIFT 0x10 +#define SEM_MAILBOX_CLEAR__CLEAR_MASK 0x0000FFFFL +#define SEM_MAILBOX_CLEAR__RESERVED_MASK 0xFFFF0000L +//SEM_REGISTER_LAST_PART2 +#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0 +#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL +//IH_ACTIVE_FCN_ID +#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0 +#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x5 +#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f +#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000001FL +#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFE0L +#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L +//IH_VIRT_RESET_REQ +#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define IH_VIRT_RESET_REQ__VF_MASK 0x00FFFFFFL +#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L +//IH_CLIENT_CFG +#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0 +#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL +//IH_RING1_CLIENT_CFG_INDEX +#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L +//IH_RING1_CLIENT_CFG_DATA +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L +//IH_CLIENT_CFG_INDEX +#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL +//IH_CLIENT_CFG_DATA +#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12 +#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16 +#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18 +#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE__SHIFT 0x19 +#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L +#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L +#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L +#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE_MASK 0x02000000L +//IH_CLIENT_CFG_DATA2 +#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR__SHIFT 0x0 +#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR_MASK 0xFFFFFFFFL +//IH_CID_REMAP_INDEX +#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0 +#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L +//IH_CID_REMAP_DATA +#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8 +#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x18 +#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0003FF00L +#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0xFF000000L +//IH_CHICKEN +#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0 +#define IH_CHICKEN__DBGU_TRIGGER_ENABLE__SHIFT 0x1 +#define IH_CHICKEN__CROSS_TRIGGER_ENABLE__SHIFT 0x2 +#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3 +#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4 +#define IH_CHICKEN__REG_FIREWALL_ENABLE__SHIFT 0x5 +#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L +#define IH_CHICKEN__DBGU_TRIGGER_ENABLE_MASK 0x00000002L +#define IH_CHICKEN__CROSS_TRIGGER_ENABLE_MASK 0x00000004L +#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L +#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L +#define IH_CHICKEN__REG_FIREWALL_ENABLE_MASK 0x00000020L +//IH_INT_DROP_CNTL +#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0 +#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1 +#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2 +#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3 +#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4 +#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5 +#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6 +#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8 +#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10 +#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L +#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L +#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L +#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L +#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L +#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L +#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L +#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L +#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L +//IH_INT_DROP_MATCH_VALUE0 +#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0 +#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8 +#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10 +#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17 +#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18 +#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL +#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L +#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x001F0000L +#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L +#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L +//IH_INT_DROP_MATCH_VALUE1 +#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0 +#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL +//IH_INT_DROP_MATCH_MASK0 +#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0 +#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8 +#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10 +#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17 +#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18 +#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL +#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L +#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x001F0000L +#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L +#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L +//IH_INT_DROP_MATCH_MASK1 +#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0 +#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL +//IH_MMHUB_CNTL +#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0 +#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8 +#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc +#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL +#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000F00L +#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x0000F000L +//IH_REGISTER_LAST_PART1 +#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0 +#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h new file mode 100644 index 000000000000..14574112c469 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h @@ -0,0 +1,1672 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _vcn_5_0_0_OFFSET_HEADER +#define _vcn_5_0_0_OFFSET_HEADER + + + +// addressBlock: uvd_uvddec +// base address: 0x1fc00 +#define regUVD_TOP_CTRL 0x0100 +#define regUVD_TOP_CTRL_BASE_IDX 1 +#define regUVD_CGC_GATE 0x0101 +#define regUVD_CGC_GATE_BASE_IDX 1 +#define regUVD_CGC_CTRL 0x0102 +#define regUVD_CGC_CTRL_BASE_IDX 1 +#define regAVM_SUVD_CGC_GATE 0x0104 +#define regAVM_SUVD_CGC_GATE_BASE_IDX 1 +#define regEFC_SUVD_CGC_GATE 0x0104 +#define regEFC_SUVD_CGC_GATE_BASE_IDX 1 +#define regENT_SUVD_CGC_GATE 0x0104 +#define regENT_SUVD_CGC_GATE_BASE_IDX 1 +#define regIME_SUVD_CGC_GATE 0x0104 +#define regIME_SUVD_CGC_GATE_BASE_IDX 1 +#define regPPU_SUVD_CGC_GATE 0x0104 +#define regPPU_SUVD_CGC_GATE_BASE_IDX 1 +#define regSAOE_SUVD_CGC_GATE 0x0104 +#define regSAOE_SUVD_CGC_GATE_BASE_IDX 1 +#define regSCM_SUVD_CGC_GATE 0x0104 +#define regSCM_SUVD_CGC_GATE_BASE_IDX 1 +#define regSDB_SUVD_CGC_GATE 0x0104 +#define regSDB_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT0_NXT_SUVD_CGC_GATE 0x0104 +#define regSIT0_NXT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT1_NXT_SUVD_CGC_GATE 0x0104 +#define regSIT1_NXT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT2_NXT_SUVD_CGC_GATE 0x0104 +#define regSIT2_NXT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSIT_SUVD_CGC_GATE 0x0104 +#define regSIT_SUVD_CGC_GATE_BASE_IDX 1 +#define regSMPA_SUVD_CGC_GATE 0x0104 +#define regSMPA_SUVD_CGC_GATE_BASE_IDX 1 +#define regSMP_SUVD_CGC_GATE 0x0104 +#define regSMP_SUVD_CGC_GATE_BASE_IDX 1 +#define regSRE_SUVD_CGC_GATE 0x0104 +#define regSRE_SUVD_CGC_GATE_BASE_IDX 1 +#define regUVD_SUVD_CGC_GATE 0x0104 +#define regUVD_SUVD_CGC_GATE_BASE_IDX 1 +#define regAVM_SUVD_CGC_GATE2 0x0105 +#define regAVM_SUVD_CGC_GATE2_BASE_IDX 1 +#define regDBR_SUVD_CGC_GATE2 0x0105 +#define regDBR_SUVD_CGC_GATE2_BASE_IDX 1 +#define regENT_SUVD_CGC_GATE2 0x0105 +#define regENT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regIME_SUVD_CGC_GATE2 0x0105 +#define regIME_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSAOE_SUVD_CGC_GATE2 0x0105 +#define regSAOE_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSDB_SUVD_CGC_GATE2 0x0105 +#define regSDB_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT0_NXT_SUVD_CGC_GATE2 0x0105 +#define regSIT0_NXT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT1_NXT_SUVD_CGC_GATE2 0x0105 +#define regSIT1_NXT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT2_NXT_SUVD_CGC_GATE2 0x0105 +#define regSIT2_NXT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSIT_SUVD_CGC_GATE2 0x0105 +#define regSIT_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSMPA_SUVD_CGC_GATE2 0x0105 +#define regSMPA_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSMP_SUVD_CGC_GATE2 0x0105 +#define regSMP_SUVD_CGC_GATE2_BASE_IDX 1 +#define regSRE_SUVD_CGC_GATE2 0x0105 +#define regSRE_SUVD_CGC_GATE2_BASE_IDX 1 +#define regUVD_SUVD_CGC_GATE2 0x0105 +#define regUVD_SUVD_CGC_GATE2_BASE_IDX 1 +#define regAVM_SUVD_CGC_CTRL 0x0106 +#define regAVM_SUVD_CGC_CTRL_BASE_IDX 1 +#define regDBR_SUVD_CGC_CTRL 0x0106 +#define regDBR_SUVD_CGC_CTRL_BASE_IDX 1 +#define regEFC_SUVD_CGC_CTRL 0x0106 +#define regEFC_SUVD_CGC_CTRL_BASE_IDX 1 +#define regENT_SUVD_CGC_CTRL 0x0106 +#define regENT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regIME_SUVD_CGC_CTRL 0x0106 +#define regIME_SUVD_CGC_CTRL_BASE_IDX 1 +#define regPPU_SUVD_CGC_CTRL 0x0106 +#define regPPU_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSAOE_SUVD_CGC_CTRL 0x0106 +#define regSAOE_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSCM_SUVD_CGC_CTRL 0x0106 +#define regSCM_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSDB_SUVD_CGC_CTRL 0x0106 +#define regSDB_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT0_NXT_SUVD_CGC_CTRL 0x0106 +#define regSIT0_NXT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT1_NXT_SUVD_CGC_CTRL 0x0106 +#define regSIT1_NXT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT2_NXT_SUVD_CGC_CTRL 0x0106 +#define regSIT2_NXT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSIT_SUVD_CGC_CTRL 0x0106 +#define regSIT_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSMPA_SUVD_CGC_CTRL 0x0106 +#define regSMPA_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSMP_SUVD_CGC_CTRL 0x0106 +#define regSMP_SUVD_CGC_CTRL_BASE_IDX 1 +#define regSRE_SUVD_CGC_CTRL 0x0106 +#define regSRE_SUVD_CGC_CTRL_BASE_IDX 1 +#define regUVD_SUVD_CGC_CTRL 0x0106 +#define regUVD_SUVD_CGC_CTRL_BASE_IDX 1 +#define regUVD_CGC_CTRL3 0x010a +#define regUVD_CGC_CTRL3_BASE_IDX 1 +#define regUVD_GPCOM_VCPU_DATA0 0x0110 +#define regUVD_GPCOM_VCPU_DATA0_BASE_IDX 1 +#define regUVD_GPCOM_VCPU_DATA1 0x0111 +#define regUVD_GPCOM_VCPU_DATA1_BASE_IDX 1 +#define regUVD_GPCOM_SYS_CMD 0x0112 +#define regUVD_GPCOM_SYS_CMD_BASE_IDX 1 +#define regUVD_GPCOM_SYS_DATA0 0x0113 +#define regUVD_GPCOM_SYS_DATA0_BASE_IDX 1 +#define regUVD_GPCOM_SYS_DATA1 0x0114 +#define regUVD_GPCOM_SYS_DATA1_BASE_IDX 1 +#define regUVD_VCPU_INT_EN 0x0115 +#define regUVD_VCPU_INT_EN_BASE_IDX 1 +#define regUVD_VCPU_INT_STATUS 0x0116 +#define regUVD_VCPU_INT_STATUS_BASE_IDX 1 +#define regUVD_VCPU_INT_ACK 0x0117 +#define regUVD_VCPU_INT_ACK_BASE_IDX 1 +#define regUVD_VCPU_INT_ROUTE 0x0118 +#define regUVD_VCPU_INT_ROUTE_BASE_IDX 1 +#define regUVD_DRV_FW_MSG 0x0119 +#define regUVD_DRV_FW_MSG_BASE_IDX 1 +#define regUVD_FW_DRV_MSG_ACK 0x011a +#define regUVD_FW_DRV_MSG_ACK_BASE_IDX 1 +#define regUVD_SUVD_INT_EN 0x011b +#define regUVD_SUVD_INT_EN_BASE_IDX 1 +#define regUVD_SUVD_INT_STATUS 0x011c +#define regUVD_SUVD_INT_STATUS_BASE_IDX 1 +#define regUVD_SUVD_INT_ACK 0x011d +#define regUVD_SUVD_INT_ACK_BASE_IDX 1 +#define regUVD_ENC_VCPU_INT_EN 0x011e +#define regUVD_ENC_VCPU_INT_EN_BASE_IDX 1 +#define regUVD_ENC_VCPU_INT_STATUS 0x011f +#define regUVD_ENC_VCPU_INT_STATUS_BASE_IDX 1 +#define regUVD_ENC_VCPU_INT_ACK 0x0120 +#define regUVD_ENC_VCPU_INT_ACK_BASE_IDX 1 +#define regUVD_MASTINT_EN 0x0121 +#define regUVD_MASTINT_EN_BASE_IDX 1 +#define regUVD_SYS_INT_EN 0x0122 +#define regUVD_SYS_INT_EN_BASE_IDX 1 +#define regUVD_SYS_INT_STATUS 0x0123 +#define regUVD_SYS_INT_STATUS_BASE_IDX 1 +#define regUVD_SYS_INT_ACK 0x0124 +#define regUVD_SYS_INT_ACK_BASE_IDX 1 +#define regUVD_JOB_DONE 0x0125 +#define regUVD_JOB_DONE_BASE_IDX 1 +#define regUVD_CBUF_ID 0x0126 +#define regUVD_CBUF_ID_BASE_IDX 1 +#define regUVD_CONTEXT_ID 0x0127 +#define regUVD_CONTEXT_ID_BASE_IDX 1 +#define regUVD_CONTEXT_ID2 0x0128 +#define regUVD_CONTEXT_ID2_BASE_IDX 1 +#define regUVD_NO_OP 0x0129 +#define regUVD_NO_OP_BASE_IDX 1 +#define regUVD_RB_BASE_LO 0x012a +#define regUVD_RB_BASE_LO_BASE_IDX 1 +#define regUVD_RB_BASE_HI 0x012b +#define regUVD_RB_BASE_HI_BASE_IDX 1 +#define regUVD_RB_SIZE 0x012c +#define regUVD_RB_SIZE_BASE_IDX 1 +#define regUVD_RB_BASE_LO2 0x012f +#define regUVD_RB_BASE_LO2_BASE_IDX 1 +#define regUVD_RB_BASE_HI2 0x0130 +#define regUVD_RB_BASE_HI2_BASE_IDX 1 +#define regUVD_RB_SIZE2 0x0131 +#define regUVD_RB_SIZE2_BASE_IDX 1 +#define regUVD_RB_BASE_LO3 0x0134 +#define regUVD_RB_BASE_LO3_BASE_IDX 1 +#define regUVD_RB_BASE_HI3 0x0135 +#define regUVD_RB_BASE_HI3_BASE_IDX 1 +#define regUVD_RB_SIZE3 0x0136 +#define regUVD_RB_SIZE3_BASE_IDX 1 +#define regUVD_RB_BASE_LO4 0x0139 +#define regUVD_RB_BASE_LO4_BASE_IDX 1 +#define regUVD_RB_BASE_HI4 0x013a +#define regUVD_RB_BASE_HI4_BASE_IDX 1 +#define regUVD_RB_SIZE4 0x013b +#define regUVD_RB_SIZE4_BASE_IDX 1 +#define regUVD_OUT_RB_BASE_LO 0x013e +#define regUVD_OUT_RB_BASE_LO_BASE_IDX 1 +#define regUVD_OUT_RB_BASE_HI 0x013f +#define regUVD_OUT_RB_BASE_HI_BASE_IDX 1 +#define regUVD_OUT_RB_SIZE 0x0140 +#define regUVD_OUT_RB_SIZE_BASE_IDX 1 +#define regUVD_IOV_ACTIVE_FCN_ID 0x0143 +#define regUVD_IOV_ACTIVE_FCN_ID_BASE_IDX 1 +#define regUVD_IOV_MAILBOX 0x0144 +#define regUVD_IOV_MAILBOX_BASE_IDX 1 +#define regUVD_IOV_MAILBOX_RESP 0x0145 +#define regUVD_IOV_MAILBOX_RESP_BASE_IDX 1 +#define regUVD_RB_ARB_CTRL 0x0146 +#define regUVD_RB_ARB_CTRL_BASE_IDX 1 +#define regUVD_CTX_INDEX 0x0147 +#define regUVD_CTX_INDEX_BASE_IDX 1 +#define regUVD_CTX_DATA 0x0148 +#define regUVD_CTX_DATA_BASE_IDX 1 +#define regUVD_CXW_WR 0x0149 +#define regUVD_CXW_WR_BASE_IDX 1 +#define regUVD_CXW_WR_INT_ID 0x014a +#define regUVD_CXW_WR_INT_ID_BASE_IDX 1 +#define regUVD_CXW_WR_INT_CTX_ID 0x014b +#define regUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1 +#define regUVD_CXW_INT_ID 0x014c +#define regUVD_CXW_INT_ID_BASE_IDX 1 +#define regUVD_MPEG2_ERROR 0x014d +#define regUVD_MPEG2_ERROR_BASE_IDX 1 +#define regUVD_YBASE 0x0150 +#define regUVD_YBASE_BASE_IDX 1 +#define regUVD_UVBASE 0x0151 +#define regUVD_UVBASE_BASE_IDX 1 +#define regUVD_PITCH 0x0152 +#define regUVD_PITCH_BASE_IDX 1 +#define regUVD_WIDTH 0x0153 +#define regUVD_WIDTH_BASE_IDX 1 +#define regUVD_HEIGHT 0x0154 +#define regUVD_HEIGHT_BASE_IDX 1 +#define regUVD_PICCOUNT 0x0155 +#define regUVD_PICCOUNT_BASE_IDX 1 +#define regUVD_MPRD_INITIAL_XY 0x0156 +#define regUVD_MPRD_INITIAL_XY_BASE_IDX 1 +#define regUVD_MPEG2_CTRL 0x0157 +#define regUVD_MPEG2_CTRL_BASE_IDX 1 +#define regUVD_MB_CTL_BUF_BASE 0x0158 +#define regUVD_MB_CTL_BUF_BASE_BASE_IDX 1 +#define regUVD_PIC_CTL_BUF_BASE 0x0159 +#define regUVD_PIC_CTL_BUF_BASE_BASE_IDX 1 +#define regUVD_DXVA_BUF_SIZE 0x015a +#define regUVD_DXVA_BUF_SIZE_BASE_IDX 1 +#define regUVD_SCRATCH_NP 0x015b +#define regUVD_SCRATCH_NP_BASE_IDX 1 +#define regUVD_CLK_SWT_HANDSHAKE 0x015c +#define regUVD_CLK_SWT_HANDSHAKE_BASE_IDX 1 +#define regUVD_GP_SCRATCH0 0x015e +#define regUVD_GP_SCRATCH0_BASE_IDX 1 +#define regUVD_GP_SCRATCH1 0x015f +#define regUVD_GP_SCRATCH1_BASE_IDX 1 +#define regUVD_GP_SCRATCH2 0x0160 +#define regUVD_GP_SCRATCH2_BASE_IDX 1 +#define regUVD_GP_SCRATCH3 0x0161 +#define regUVD_GP_SCRATCH3_BASE_IDX 1 +#define regUVD_GP_SCRATCH4 0x0162 +#define regUVD_GP_SCRATCH4_BASE_IDX 1 +#define regUVD_GP_SCRATCH5 0x0163 +#define regUVD_GP_SCRATCH5_BASE_IDX 1 +#define regUVD_GP_SCRATCH6 0x0164 +#define regUVD_GP_SCRATCH6_BASE_IDX 1 +#define regUVD_GP_SCRATCH7 0x0165 +#define regUVD_GP_SCRATCH7_BASE_IDX 1 +#define regUVD_GP_SCRATCH8 0x0166 +#define regUVD_GP_SCRATCH8_BASE_IDX 1 +#define regUVD_GP_SCRATCH9 0x0167 +#define regUVD_GP_SCRATCH9_BASE_IDX 1 +#define regUVD_GP_SCRATCH10 0x0168 +#define regUVD_GP_SCRATCH10_BASE_IDX 1 +#define regUVD_GP_SCRATCH11 0x0169 +#define regUVD_GP_SCRATCH11_BASE_IDX 1 +#define regUVD_GP_SCRATCH12 0x016a +#define regUVD_GP_SCRATCH12_BASE_IDX 1 +#define regUVD_GP_SCRATCH13 0x016b +#define regUVD_GP_SCRATCH13_BASE_IDX 1 +#define regUVD_GP_SCRATCH14 0x016c +#define regUVD_GP_SCRATCH14_BASE_IDX 1 +#define regUVD_GP_SCRATCH15 0x016d +#define regUVD_GP_SCRATCH15_BASE_IDX 1 +#define regUVD_GP_SCRATCH16 0x016e +#define regUVD_GP_SCRATCH16_BASE_IDX 1 +#define regUVD_GP_SCRATCH17 0x016f +#define regUVD_GP_SCRATCH17_BASE_IDX 1 +#define regUVD_GP_SCRATCH18 0x0170 +#define regUVD_GP_SCRATCH18_BASE_IDX 1 +#define regUVD_GP_SCRATCH19 0x0171 +#define regUVD_GP_SCRATCH19_BASE_IDX 1 +#define regUVD_GP_SCRATCH20 0x0172 +#define regUVD_GP_SCRATCH20_BASE_IDX 1 +#define regUVD_GP_SCRATCH21 0x0173 +#define regUVD_GP_SCRATCH21_BASE_IDX 1 +#define regUVD_GP_SCRATCH22 0x0174 +#define regUVD_GP_SCRATCH22_BASE_IDX 1 +#define regUVD_GP_SCRATCH23 0x0175 +#define regUVD_GP_SCRATCH23_BASE_IDX 1 +#define regUVD_AUDIO_RB_BASE_LO 0x0176 +#define regUVD_AUDIO_RB_BASE_LO_BASE_IDX 1 +#define regUVD_AUDIO_RB_BASE_HI 0x0177 +#define regUVD_AUDIO_RB_BASE_HI_BASE_IDX 1 +#define regUVD_AUDIO_RB_SIZE 0x0178 +#define regUVD_AUDIO_RB_SIZE_BASE_IDX 1 +#define regUVD_VCPU_INT_STATUS2 0x017b +#define regUVD_VCPU_INT_STATUS2_BASE_IDX 1 +#define regUVD_VCPU_INT_ACK2 0x017c +#define regUVD_VCPU_INT_ACK2_BASE_IDX 1 +#define regUVD_VCPU_INT_EN2 0x017d +#define regUVD_VCPU_INT_EN2_BASE_IDX 1 +#define regUVD_SUVD_CGC_STATUS2 0x017e +#define regUVD_SUVD_CGC_STATUS2_BASE_IDX 1 +#define regUVD_SUVD_INT_STATUS2 0x0180 +#define regUVD_SUVD_INT_STATUS2_BASE_IDX 1 +#define regUVD_SUVD_INT_EN2 0x0181 +#define regUVD_SUVD_INT_EN2_BASE_IDX 1 +#define regUVD_SUVD_INT_ACK2 0x0182 +#define regUVD_SUVD_INT_ACK2_BASE_IDX 1 +#define regUVD_STATUS 0x0183 +#define regUVD_STATUS_BASE_IDX 1 +#define regUVD_ENC_PIPE_BUSY 0x0184 +#define regUVD_ENC_PIPE_BUSY_BASE_IDX 1 +#define regUVD_FW_POWER_STATUS 0x0185 +#define regUVD_FW_POWER_STATUS_BASE_IDX 1 +#define regUVD_CNTL 0x0186 +#define regUVD_CNTL_BASE_IDX 1 +#define regUVD_SOFT_RESET 0x0187 +#define regUVD_SOFT_RESET_BASE_IDX 1 +#define regUVD_SOFT_RESET2 0x0188 +#define regUVD_SOFT_RESET2_BASE_IDX 1 +#define regUVD_MMSCH_SOFT_RESET 0x0189 +#define regUVD_MMSCH_SOFT_RESET_BASE_IDX 1 +#define regUVD_WIG_CTRL 0x018a +#define regUVD_WIG_CTRL_BASE_IDX 1 +#define regUVD_CGC_STATUS 0x018c +#define regUVD_CGC_STATUS_BASE_IDX 1 +#define regUVD_CGC_UDEC_STATUS 0x018e +#define regUVD_CGC_UDEC_STATUS_BASE_IDX 1 +#define regUVD_SUVD_CGC_STATUS 0x0190 +#define regUVD_SUVD_CGC_STATUS_BASE_IDX 1 +#define regUVD_GPCOM_VCPU_CMD 0x0192 +#define regUVD_GPCOM_VCPU_CMD_BASE_IDX 1 + + +// addressBlock: uvd_vcn_cdefe_cdefe_broadcast_dec0 +// base address: 0x1fc00 +#define regCDEFE_SUVD_CGC_GATE 0x0104 +#define regCDEFE_SUVD_CGC_GATE_BASE_IDX 1 +#define regCDEFE_SUVD_CGC_GATE2 0x0105 +#define regCDEFE_SUVD_CGC_GATE2_BASE_IDX 1 +#define regCDEFE_SUVD_CGC_CTRL 0x0106 +#define regCDEFE_SUVD_CGC_CTRL_BASE_IDX 1 + + +// addressBlock: uvd_ecpudec +// base address: 0x1ff00 +#define regUVD_VCPU_CACHE_OFFSET0 0x01c0 +#define regUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE0 0x01c1 +#define regUVD_VCPU_CACHE_SIZE0_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET1 0x01c2 +#define regUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE1 0x01c3 +#define regUVD_VCPU_CACHE_SIZE1_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET2 0x01c4 +#define regUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE2 0x01c5 +#define regUVD_VCPU_CACHE_SIZE2_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET3 0x01c6 +#define regUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE3 0x01c7 +#define regUVD_VCPU_CACHE_SIZE3_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET4 0x01c8 +#define regUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE4 0x01c9 +#define regUVD_VCPU_CACHE_SIZE4_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET5 0x01ca +#define regUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE5 0x01cb +#define regUVD_VCPU_CACHE_SIZE5_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET6 0x01cc +#define regUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE6 0x01cd +#define regUVD_VCPU_CACHE_SIZE6_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET7 0x01ce +#define regUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE7 0x01cf +#define regUVD_VCPU_CACHE_SIZE7_BASE_IDX 1 +#define regUVD_VCPU_CACHE_OFFSET8 0x01d0 +#define regUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1 +#define regUVD_VCPU_CACHE_SIZE8 0x01d1 +#define regUVD_VCPU_CACHE_SIZE8_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_OFFSET0 0x01d2 +#define regUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_SIZE0 0x01d3 +#define regUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_OFFSET1 0x01d4 +#define regUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1 +#define regUVD_VCPU_NONCACHE_SIZE1 0x01d5 +#define regUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1 +#define regUVD_VCPU_CNTL 0x01d6 +#define regUVD_VCPU_CNTL_BASE_IDX 1 +#define regUVD_VCPU_PRID 0x01d7 +#define regUVD_VCPU_PRID_BASE_IDX 1 +#define regUVD_VCPU_TRCE 0x01d8 +#define regUVD_VCPU_TRCE_BASE_IDX 1 +#define regUVD_VCPU_TRCE_RD 0x01d9 +#define regUVD_VCPU_TRCE_RD_BASE_IDX 1 +#define regUVD_VCPU_IND_INDEX 0x01db +#define regUVD_VCPU_IND_INDEX_BASE_IDX 1 +#define regUVD_VCPU_IND_DATA 0x01dc +#define regUVD_VCPU_IND_DATA_BASE_IDX 1 + + +// addressBlock: uvd_lmi_adpdec +// base address: 0x20290 +#define regUVD_LMI_RE_64BIT_BAR_LOW 0x02af +#define regUVD_LMI_RE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_RE_64BIT_BAR_HIGH 0x02b0 +#define regUVD_LMI_RE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_IT_64BIT_BAR_LOW 0x02b1 +#define regUVD_LMI_IT_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_IT_64BIT_BAR_HIGH 0x02b2 +#define regUVD_LMI_IT_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MP_64BIT_BAR_LOW 0x02b3 +#define regUVD_LMI_MP_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MP_64BIT_BAR_HIGH 0x02b4 +#define regUVD_LMI_MP_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_CM_64BIT_BAR_LOW 0x02b5 +#define regUVD_LMI_CM_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_CM_64BIT_BAR_HIGH 0x02b6 +#define regUVD_LMI_CM_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_DB_64BIT_BAR_LOW 0x02b7 +#define regUVD_LMI_DB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_DB_64BIT_BAR_HIGH 0x02b8 +#define regUVD_LMI_DB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_DBW_64BIT_BAR_LOW 0x02b9 +#define regUVD_LMI_DBW_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_DBW_64BIT_BAR_HIGH 0x02ba +#define regUVD_LMI_DBW_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_IDCT_64BIT_BAR_LOW 0x02bb +#define regUVD_LMI_IDCT_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_IDCT_64BIT_BAR_HIGH 0x02bc +#define regUVD_LMI_IDCT_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW 0x02bd +#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH 0x02be +#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW 0x02bf +#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH 0x02c0 +#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW 0x02c1 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH 0x02c2 +#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x02c5 +#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x02c6 +#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x02c7 +#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x02c8 +#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_LBSI_64BIT_BAR_LOW 0x02c9 +#define regUVD_LMI_LBSI_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_LBSI_64BIT_BAR_HIGH 0x02ca +#define regUVD_LMI_LBSI_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x02cb +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x02cc +#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x02cd +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x02ce +#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x02cf +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x02d0 +#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_CENC_64BIT_BAR_LOW 0x02d1 +#define regUVD_LMI_CENC_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_CENC_64BIT_BAR_HIGH 0x02d2 +#define regUVD_LMI_CENC_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_SRE_64BIT_BAR_LOW 0x02d3 +#define regUVD_LMI_SRE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_SRE_64BIT_BAR_HIGH 0x02d4 +#define regUVD_LMI_SRE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW 0x02d5 +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH 0x02d6 +#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW 0x02d7 +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH 0x02d8 +#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW 0x02d9 +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH 0x02da +#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW 0x02dd +#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH 0x02de +#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW 0x02df +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH 0x02e0 +#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW 0x02e1 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH 0x02e2 +#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW 0x02e3 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH 0x02e4 +#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW 0x02e5 +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH 0x02e6 +#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW 0x02e7 +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH 0x02e8 +#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW 0x02e9 +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH 0x02ea +#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW 0x02eb +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH 0x02ec +#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW 0x02ed +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH 0x02ee +#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW 0x02ef +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH 0x02f0 +#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW 0x02f1 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH 0x02f2 +#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x02fb +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x02fc +#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x02fd +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x02fe +#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x02ff +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x0300 +#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x0301 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x0302 +#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0303 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0304 +#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x0305 +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x0306 +#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x0307 +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x0308 +#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x0309 +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x030a +#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW 0x030b +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH 0x030c +#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW 0x030d +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH 0x030e +#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_SPH_64BIT_BAR_HIGH 0x030f +#define regUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW 0x0318 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH 0x0319 +#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW 0x031a +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH 0x031b +#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW 0x031c +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH 0x031d +#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW 0x031e +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH 0x031f +#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_ADP_ATOMIC_CONFIG 0x0321 +#define regUVD_ADP_ATOMIC_CONFIG_BASE_IDX 1 +#define regUVD_LMI_ARB_CTRL2 0x0322 +#define regUVD_LMI_ARB_CTRL2_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x0327 +#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1 +#define regUVD_LMI_VCPU_NC_VMIDS_MULTI 0x0328 +#define regUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1 +#define regUVD_LMI_LAT_CTRL 0x0329 +#define regUVD_LMI_LAT_CTRL_BASE_IDX 1 +#define regUVD_LMI_LAT_CNTR 0x032a +#define regUVD_LMI_LAT_CNTR_BASE_IDX 1 +#define regUVD_LMI_AVG_LAT_CNTR 0x032b +#define regUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1 +#define regUVD_LMI_SPH 0x032c +#define regUVD_LMI_SPH_BASE_IDX 1 +#define regUVD_LMI_VCPU_CACHE_VMID 0x032d +#define regUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1 +#define regUVD_LMI_CTRL2 0x032e +#define regUVD_LMI_CTRL2_BASE_IDX 1 +#define regUVD_LMI_URGENT_CTRL 0x032f +#define regUVD_LMI_URGENT_CTRL_BASE_IDX 1 +#define regUVD_LMI_CTRL 0x0330 +#define regUVD_LMI_CTRL_BASE_IDX 1 +#define regUVD_LMI_STATUS 0x0331 +#define regUVD_LMI_STATUS_BASE_IDX 1 +#define regUVD_LMI_PERFMON_CTRL 0x0334 +#define regUVD_LMI_PERFMON_CTRL_BASE_IDX 1 +#define regUVD_LMI_PERFMON_COUNT_LO 0x0335 +#define regUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1 +#define regUVD_LMI_PERFMON_COUNT_HI 0x0336 +#define regUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1 +#define regUVD_LMI_ADP_SWAP_CNTL 0x0337 +#define regUVD_LMI_ADP_SWAP_CNTL_BASE_IDX 1 +#define regUVD_LMI_RBC_RB_VMID 0x0338 +#define regUVD_LMI_RBC_RB_VMID_BASE_IDX 1 +#define regUVD_LMI_RBC_IB_VMID 0x0339 +#define regUVD_LMI_RBC_IB_VMID_BASE_IDX 1 +#define regUVD_LMI_MC_CREDITS 0x033a +#define regUVD_LMI_MC_CREDITS_BASE_IDX 1 +#define regUVD_LMI_ADP_IND_INDEX 0x033e +#define regUVD_LMI_ADP_IND_INDEX_BASE_IDX 1 +#define regUVD_LMI_ADP_IND_DATA 0x033f +#define regUVD_LMI_ADP_IND_DATA_BASE_IDX 1 +#define regUVD_LMI_ADP_PF_EN 0x0340 +#define regUVD_LMI_ADP_PF_EN_BASE_IDX 1 +#define regUVD_LMI_PREF_CTRL 0x0342 +#define regUVD_LMI_PREF_CTRL_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jpeg0_jpegnpdec +// base address: 0x20f00 +#define regUVD_JPEG_CNTL 0x05c0 +#define regUVD_JPEG_CNTL_BASE_IDX 1 +#define regUVD_JPEG_RB_BASE 0x05c1 +#define regUVD_JPEG_RB_BASE_BASE_IDX 1 +#define regUVD_JPEG_RB_WPTR 0x05c2 +#define regUVD_JPEG_RB_WPTR_BASE_IDX 1 +#define regUVD_JPEG_RB_RPTR 0x05c3 +#define regUVD_JPEG_RB_RPTR_BASE_IDX 1 +#define regUVD_JPEG_RB_SIZE 0x05c4 +#define regUVD_JPEG_RB_SIZE_BASE_IDX 1 +#define regUVD_JPEG_DEC_CNT 0x05c5 +#define regUVD_JPEG_DEC_CNT_BASE_IDX 1 +#define regUVD_JPEG_SPS_INFO 0x05c6 +#define regUVD_JPEG_SPS_INFO_BASE_IDX 1 +#define regUVD_JPEG_SPS1_INFO 0x05c7 +#define regUVD_JPEG_SPS1_INFO_BASE_IDX 1 +#define regUVD_JPEG_RE_TIMER 0x05c8 +#define regUVD_JPEG_RE_TIMER_BASE_IDX 1 +#define regUVD_JPEG_DEC_SCRATCH0 0x05c9 +#define regUVD_JPEG_DEC_SCRATCH0_BASE_IDX 1 +#define regUVD_JPEG_INT_EN 0x05ca +#define regUVD_JPEG_INT_EN_BASE_IDX 1 +#define regUVD_JPEG_INT_STAT 0x05cb +#define regUVD_JPEG_INT_STAT_BASE_IDX 1 +#define regUVD_JPEG_TIER_CNTL0 0x05cc +#define regUVD_JPEG_TIER_CNTL0_BASE_IDX 1 +#define regUVD_JPEG_TIER_CNTL1 0x05cd +#define regUVD_JPEG_TIER_CNTL1_BASE_IDX 1 +#define regUVD_JPEG_TIER_CNTL2 0x05ce +#define regUVD_JPEG_TIER_CNTL2_BASE_IDX 1 +#define regUVD_JPEG_TIER_STATUS 0x05cf +#define regUVD_JPEG_TIER_STATUS_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jpeg_sclk0_jpegnpsclkdec +// base address: 0x21000 +#define regUVD_JPEG_OUTBUF_CNTL 0x0600 +#define regUVD_JPEG_OUTBUF_CNTL_BASE_IDX 1 +#define regUVD_JPEG_OUTBUF_WPTR 0x0601 +#define regUVD_JPEG_OUTBUF_WPTR_BASE_IDX 1 +#define regUVD_JPEG_OUTBUF_RPTR 0x0602 +#define regUVD_JPEG_OUTBUF_RPTR_BASE_IDX 1 +#define regUVD_JPEG_PITCH 0x0603 +#define regUVD_JPEG_PITCH_BASE_IDX 1 +#define regUVD_JPEG_UV_PITCH 0x0604 +#define regUVD_JPEG_UV_PITCH_BASE_IDX 1 +#define regJPEG_DEC_Y_GFX8_TILING_SURFACE 0x0605 +#define regJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_UV_GFX8_TILING_SURFACE 0x0606 +#define regJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_GFX8_ADDR_CONFIG 0x0607 +#define regJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 1 +#define regJPEG_DEC_Y_GFX10_TILING_SURFACE 0x0608 +#define regJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_UV_GFX10_TILING_SURFACE 0x0609 +#define regJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 1 +#define regJPEG_DEC_GFX10_ADDR_CONFIG 0x060a +#define regJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 1 +#define regJPEG_DEC_ADDR_MODE 0x060b +#define regJPEG_DEC_ADDR_MODE_BASE_IDX 1 +#define regUVD_JPEG_OUTPUT_XY 0x060c +#define regUVD_JPEG_OUTPUT_XY_BASE_IDX 1 +#define regUVD_JPEG_GPCOM_CMD 0x060d +#define regUVD_JPEG_GPCOM_CMD_BASE_IDX 1 +#define regUVD_JPEG_GPCOM_DATA0 0x060e +#define regUVD_JPEG_GPCOM_DATA0_BASE_IDX 1 +#define regUVD_JPEG_GPCOM_DATA1 0x060f +#define regUVD_JPEG_GPCOM_DATA1_BASE_IDX 1 +#define regUVD_JPEG_SCRATCH1 0x0610 +#define regUVD_JPEG_SCRATCH1_BASE_IDX 1 +#define regUVD_JPEG_DEC_SOFT_RST 0x0611 +#define regUVD_JPEG_DEC_SOFT_RST_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jrbc0_uvd_jrbc_dec +// base address: 0x21100 +#define regUVD_JRBC_RB_WPTR 0x0640 +#define regUVD_JRBC_RB_WPTR_BASE_IDX 1 +#define regUVD_JRBC_RB_CNTL 0x0641 +#define regUVD_JRBC_RB_CNTL_BASE_IDX 1 +#define regUVD_JRBC_IB_SIZE 0x0642 +#define regUVD_JRBC_IB_SIZE_BASE_IDX 1 +#define regUVD_JRBC_URGENT_CNTL 0x0643 +#define regUVD_JRBC_URGENT_CNTL_BASE_IDX 1 +#define regUVD_JRBC_RB_REF_DATA 0x0644 +#define regUVD_JRBC_RB_REF_DATA_BASE_IDX 1 +#define regUVD_JRBC_RB_COND_RD_TIMER 0x0645 +#define regUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1 +#define regUVD_JRBC_SOFT_RESET 0x0648 +#define regUVD_JRBC_SOFT_RESET_BASE_IDX 1 +#define regUVD_JRBC_STATUS 0x0649 +#define regUVD_JRBC_STATUS_BASE_IDX 1 +#define regUVD_JRBC_RB_RPTR 0x064a +#define regUVD_JRBC_RB_RPTR_BASE_IDX 1 +#define regUVD_JRBC_RB_BUF_STATUS 0x064b +#define regUVD_JRBC_RB_BUF_STATUS_BASE_IDX 1 +#define regUVD_JRBC_IB_BUF_STATUS 0x064c +#define regUVD_JRBC_IB_BUF_STATUS_BASE_IDX 1 +#define regUVD_JRBC_IB_SIZE_UPDATE 0x064d +#define regUVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 1 +#define regUVD_JRBC_IB_COND_RD_TIMER 0x064e +#define regUVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 1 +#define regUVD_JRBC_IB_REF_DATA 0x064f +#define regUVD_JRBC_IB_REF_DATA_BASE_IDX 1 +#define regUVD_JPEG_PREEMPT_CMD 0x0650 +#define regUVD_JPEG_PREEMPT_CMD_BASE_IDX 1 +#define regUVD_JPEG_PREEMPT_FENCE_DATA0 0x0651 +#define regUVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 1 +#define regUVD_JPEG_PREEMPT_FENCE_DATA1 0x0652 +#define regUVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 1 +#define regUVD_JRBC_RB_SIZE 0x0653 +#define regUVD_JRBC_RB_SIZE_BASE_IDX 1 +#define regUVD_JRBC_SCRATCH0 0x0654 +#define regUVD_JRBC_SCRATCH0_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jmi0_uvd_jmi_dec +// base address: 0x21180 +#define regUVD_JPEG_DEC_PF_CTRL 0x0660 +#define regUVD_JPEG_DEC_PF_CTRL_BASE_IDX 1 +#define regUVD_LMI_JRBC_CTRL 0x0661 +#define regUVD_LMI_JRBC_CTRL_BASE_IDX 1 +#define regUVD_LMI_JPEG_CTRL 0x0662 +#define regUVD_LMI_JPEG_CTRL_BASE_IDX 1 +#define regJPEG_LMI_DROP 0x0663 +#define regJPEG_LMI_DROP_BASE_IDX 1 +#define regUVD_LMI_JRBC_IB_VMID 0x0664 +#define regUVD_LMI_JRBC_IB_VMID_BASE_IDX 1 +#define regUVD_LMI_JRBC_RB_VMID 0x0665 +#define regUVD_LMI_JRBC_RB_VMID_BASE_IDX 1 +#define regUVD_LMI_JPEG_VMID 0x0666 +#define regUVD_LMI_JPEG_VMID_BASE_IDX 1 +#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0667 +#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0668 +#define regUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0669 +#define regUVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x066a +#define regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x066b +#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x066c +#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JPEG_PREEMPT_VMID 0x066d +#define regUVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 1 +#define regUVD_JMI_DEC_SWAP_CNTL 0x066e +#define regUVD_JMI_DEC_SWAP_CNTL_BASE_IDX 1 +#define regUVD_JMI_ATOMIC_CNTL 0x066f +#define regUVD_JMI_ATOMIC_CNTL_BASE_IDX 1 +#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0670 +#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0671 +#define regUVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0672 +#define regUVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0673 +#define regUVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0674 +#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0675 +#define regUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0676 +#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0677 +#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0678 +#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679 +#define regUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_JMI_ATOMIC_CNTL2 0x067d +#define regUVD_JMI_ATOMIC_CNTL2_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jmi_common_dec +// base address: 0x21300 +#define regUVD_JADP_MCIF_URGENT_CTRL 0x06c1 +#define regUVD_JADP_MCIF_URGENT_CTRL_BASE_IDX 1 +#define regUVD_JMI_URGENT_CTRL 0x06c2 +#define regUVD_JMI_URGENT_CTRL_BASE_IDX 1 +#define regUVD_JMI_CTRL 0x06c3 +#define regUVD_JMI_CTRL_BASE_IDX 1 +#define regJPEG_MEMCHECK_CLAMPING_CNTL 0x06c4 +#define regJPEG_MEMCHECK_CLAMPING_CNTL_BASE_IDX 1 +#define regJPEG_MEMCHECK_SAFE_ADDR 0x06c5 +#define regJPEG_MEMCHECK_SAFE_ADDR_BASE_IDX 1 +#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT 0x06c6 +#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT_BASE_IDX 1 +#define regUVD_JMI_LAT_CTRL 0x06c7 +#define regUVD_JMI_LAT_CTRL_BASE_IDX 1 +#define regUVD_JMI_LAT_CNTR 0x06c8 +#define regUVD_JMI_LAT_CNTR_BASE_IDX 1 +#define regUVD_JMI_AVG_LAT_CNTR 0x06c9 +#define regUVD_JMI_AVG_LAT_CNTR_BASE_IDX 1 +#define regUVD_JMI_PERFMON_CTRL 0x06ca +#define regUVD_JMI_PERFMON_CTRL_BASE_IDX 1 +#define regUVD_JMI_PERFMON_COUNT_LO 0x06cb +#define regUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 1 +#define regUVD_JMI_PERFMON_COUNT_HI 0x06cc +#define regUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 1 +#define regUVD_JMI_CLEAN_STATUS 0x06cd +#define regUVD_JMI_CLEAN_STATUS_BASE_IDX 1 +#define regUVD_JMI_CNTL 0x06ce +#define regUVD_JMI_CNTL_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jpeg_common_dec +// base address: 0x21400 +#define regJPEG_SOFT_RESET_STATUS 0x0700 +#define regJPEG_SOFT_RESET_STATUS_BASE_IDX 1 +#define regJPEG_SYS_INT_EN 0x0701 +#define regJPEG_SYS_INT_EN_BASE_IDX 1 +#define regJPEG_SYS_INT_EN1 0x0702 +#define regJPEG_SYS_INT_EN1_BASE_IDX 1 +#define regJPEG_SYS_INT_STATUS 0x0703 +#define regJPEG_SYS_INT_STATUS_BASE_IDX 1 +#define regJPEG_SYS_INT_STATUS1 0x0704 +#define regJPEG_SYS_INT_STATUS1_BASE_IDX 1 +#define regJPEG_SYS_INT_ACK 0x0705 +#define regJPEG_SYS_INT_ACK_BASE_IDX 1 +#define regJPEG_SYS_INT_ACK1 0x0706 +#define regJPEG_SYS_INT_ACK1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_EN 0x0707 +#define regJPEG_MEMCHECK_SYS_INT_EN_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_EN1 0x0708 +#define regJPEG_MEMCHECK_SYS_INT_EN1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_STAT 0x0709 +#define regJPEG_MEMCHECK_SYS_INT_STAT_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_STAT1 0x070a +#define regJPEG_MEMCHECK_SYS_INT_STAT1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_STAT2 0x070b +#define regJPEG_MEMCHECK_SYS_INT_STAT2_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_ACK 0x070c +#define regJPEG_MEMCHECK_SYS_INT_ACK_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_ACK1 0x070d +#define regJPEG_MEMCHECK_SYS_INT_ACK1_BASE_IDX 1 +#define regJPEG_MEMCHECK_SYS_INT_ACK2 0x070e +#define regJPEG_MEMCHECK_SYS_INT_ACK2_BASE_IDX 1 +#define regJPEG_MASTINT_EN 0x070f +#define regJPEG_MASTINT_EN_BASE_IDX 1 +#define regJPEG_IH_CTRL 0x0710 +#define regJPEG_IH_CTRL_BASE_IDX 1 +#define regJRBBM_ARB_CTRL 0x0712 +#define regJRBBM_ARB_CTRL_BASE_IDX 1 + + +// addressBlock: uvd_uvd_jpeg_common_sclk_dec +// base address: 0x21480 +#define regJPEG_CGC_GATE 0x0720 +#define regJPEG_CGC_GATE_BASE_IDX 1 +#define regJPEG_CGC_CTRL 0x0721 +#define regJPEG_CGC_CTRL_BASE_IDX 1 +#define regJPEG_CGC_STATUS 0x0722 +#define regJPEG_CGC_STATUS_BASE_IDX 1 +#define regJPEG_COMN_CGC_MEM_CTRL 0x0723 +#define regJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 1 +#define regJPEG_DEC_CGC_MEM_CTRL 0x0724 +#define regJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 1 +#define regJPEG_ENC_CGC_MEM_CTRL 0x0726 +#define regJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 1 +#define regJPEG_PERF_BANK_CONF 0x0727 +#define regJPEG_PERF_BANK_CONF_BASE_IDX 1 +#define regJPEG_PERF_BANK_EVENT_SEL 0x0728 +#define regJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT0 0x0729 +#define regJPEG_PERF_BANK_COUNT0_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT1 0x072a +#define regJPEG_PERF_BANK_COUNT1_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT2 0x072b +#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1 +#define regJPEG_PERF_BANK_COUNT3 0x072c +#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1 + + +// addressBlock: uvd_uvd_pg_dec +// base address: 0x1f800 +#define regUVD_IPX_DLDO_CONFIG 0x0000 +#define regUVD_IPX_DLDO_CONFIG_BASE_IDX 1 +#define regUVD_IPX_DLDO_STATUS 0x0001 +#define regUVD_IPX_DLDO_STATUS_BASE_IDX 1 +#define regUVD_POWER_STATUS 0x0002 +#define regUVD_POWER_STATUS_BASE_IDX 1 +#define regUVD_JPEG_POWER_STATUS 0x0003 +#define regUVD_JPEG_POWER_STATUS_BASE_IDX 1 +#define regUVD_MC_DJPEG_RD_SPACE 0x0007 +#define regUVD_MC_DJPEG_RD_SPACE_BASE_IDX 1 +#define regUVD_MC_DJPEG_WR_SPACE 0x0008 +#define regUVD_MC_DJPEG_WR_SPACE_BASE_IDX 1 +#define regUVD_PG_IND_INDEX 0x000c +#define regUVD_PG_IND_INDEX_BASE_IDX 1 +#define regUVD_PG_IND_DATA 0x000e +#define regUVD_PG_IND_DATA_BASE_IDX 1 +#define regCC_UVD_HARVESTING 0x000f +#define regCC_UVD_HARVESTING_BASE_IDX 1 +#define regUVD_DPG_LMA_CTL 0x0011 +#define regUVD_DPG_LMA_CTL_BASE_IDX 1 +#define regUVD_DPG_LMA_DATA 0x0012 +#define regUVD_DPG_LMA_DATA_BASE_IDX 1 +#define regUVD_DPG_LMA_MASK 0x0013 +#define regUVD_DPG_LMA_MASK_BASE_IDX 1 +#define regUVD_DPG_PAUSE 0x0014 +#define regUVD_DPG_PAUSE_BASE_IDX 1 +#define regUVD_SCRATCH1 0x0015 +#define regUVD_SCRATCH1_BASE_IDX 1 +#define regUVD_SCRATCH2 0x0016 +#define regUVD_SCRATCH2_BASE_IDX 1 +#define regUVD_SCRATCH3 0x0017 +#define regUVD_SCRATCH3_BASE_IDX 1 +#define regUVD_SCRATCH4 0x0018 +#define regUVD_SCRATCH4_BASE_IDX 1 +#define regUVD_SCRATCH5 0x0019 +#define regUVD_SCRATCH5_BASE_IDX 1 +#define regUVD_SCRATCH6 0x001a +#define regUVD_SCRATCH6_BASE_IDX 1 +#define regUVD_SCRATCH7 0x001b +#define regUVD_SCRATCH7_BASE_IDX 1 +#define regUVD_SCRATCH8 0x001c +#define regUVD_SCRATCH8_BASE_IDX 1 +#define regUVD_SCRATCH9 0x001d +#define regUVD_SCRATCH9_BASE_IDX 1 +#define regUVD_SCRATCH10 0x001e +#define regUVD_SCRATCH10_BASE_IDX 1 +#define regUVD_SCRATCH11 0x001f +#define regUVD_SCRATCH11_BASE_IDX 1 +#define regUVD_SCRATCH12 0x0020 +#define regUVD_SCRATCH12_BASE_IDX 1 +#define regUVD_SCRATCH13 0x0021 +#define regUVD_SCRATCH13_BASE_IDX 1 +#define regUVD_SCRATCH14 0x0022 +#define regUVD_SCRATCH14_BASE_IDX 1 +#define regUVD_FREE_COUNTER_REG 0x0023 +#define regUVD_FREE_COUNTER_REG_BASE_IDX 1 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0024 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0025 +#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_DPG_VCPU_CACHE_OFFSET0 0x0026 +#define regUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1 +#define regUVD_DPG_LMI_VCPU_CACHE_VMID 0x0027 +#define regUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1 +#define regUVD_REG_FILTER_EN 0x0028 +#define regUVD_REG_FILTER_EN_BASE_IDX 1 +#define regUVD_SECURITY_REG_VIO_REPORT 0x0029 +#define regUVD_SECURITY_REG_VIO_REPORT_BASE_IDX 1 +#define regUVD_FW_VERSION 0x002a +#define regUVD_FW_VERSION_BASE_IDX 1 +#define regUVD_PF_STATUS 0x002c +#define regUVD_PF_STATUS_BASE_IDX 1 +#define regUVD_DPG_CLK_EN_VCPU_REPORT 0x002e +#define regUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO 0x002f +#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI 0x0030 +#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO 0x0031 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI 0x0032 +#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR 0x0033 +#define regCC_UVD_VCPU_ERR_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_INST_ADDR_LO 0x0034 +#define regCC_UVD_VCPU_ERR_INST_ADDR_LO_BASE_IDX 1 +#define regCC_UVD_VCPU_ERR_INST_ADDR_HI 0x0035 +#define regCC_UVD_VCPU_ERR_INST_ADDR_HI_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC_SPACE 0x003d +#define regUVD_LMI_MMSCH_NC_SPACE_BASE_IDX 1 +#define regUVD_LMI_ATOMIC_SPACE 0x003e +#define regUVD_LMI_ATOMIC_SPACE_BASE_IDX 1 +#define regUVD_GFX8_ADDR_CONFIG 0x0041 +#define regUVD_GFX8_ADDR_CONFIG_BASE_IDX 1 +#define regUVD_GFX10_ADDR_CONFIG 0x0042 +#define regUVD_GFX10_ADDR_CONFIG_BASE_IDX 1 +#define regUVD_GPCNT2_CNTL 0x0043 +#define regUVD_GPCNT2_CNTL_BASE_IDX 1 +#define regUVD_GPCNT2_TARGET_LOWER 0x0044 +#define regUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1 +#define regUVD_GPCNT2_STATUS_LOWER 0x0045 +#define regUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1 +#define regUVD_GPCNT2_TARGET_UPPER 0x0046 +#define regUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1 +#define regUVD_GPCNT2_STATUS_UPPER 0x0047 +#define regUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1 +#define regUVD_GPCNT3_CNTL 0x0048 +#define regUVD_GPCNT3_CNTL_BASE_IDX 1 +#define regUVD_GPCNT3_TARGET_LOWER 0x0049 +#define regUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1 +#define regUVD_GPCNT3_STATUS_LOWER 0x004a +#define regUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1 +#define regUVD_GPCNT3_TARGET_UPPER 0x004b +#define regUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1 +#define regUVD_GPCNT3_STATUS_UPPER 0x004c +#define regUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1 +#define regUVD_VCLK_DS_CNTL 0x004d +#define regUVD_VCLK_DS_CNTL_BASE_IDX 1 +#define regUVD_DCLK_DS_CNTL 0x004e +#define regUVD_DCLK_DS_CNTL_BASE_IDX 1 +#define regUVD_TSC_LOWER 0x004f +#define regUVD_TSC_LOWER_BASE_IDX 1 +#define regUVD_TSC_UPPER 0x0050 +#define regUVD_TSC_UPPER_BASE_IDX 1 +#define regVCN_FEATURES 0x0051 +#define regVCN_FEATURES_BASE_IDX 1 +#define regUVD_GPUIOV_STATUS 0x0055 +#define regUVD_GPUIOV_STATUS_BASE_IDX 1 +#define regUVD_SCRATCH15 0x005c +#define regUVD_SCRATCH15_BASE_IDX 1 +#define regUVD_VERSION 0x005d +#define regUVD_VERSION_BASE_IDX 1 +#define regVCN_UMSCH_CNTL 0x005e +#define regVCN_UMSCH_CNTL_BASE_IDX 1 +#define regVCN_JPEG_DB_CTRL 0x0068 +#define regVCN_JPEG_DB_CTRL_BASE_IDX 1 +#define regVCN_RB1_DB_CTRL 0x0072 +#define regVCN_RB1_DB_CTRL_BASE_IDX 1 +#define regVCN_RB2_DB_CTRL 0x0073 +#define regVCN_RB2_DB_CTRL_BASE_IDX 1 +#define regVCN_RB3_DB_CTRL 0x0074 +#define regVCN_RB3_DB_CTRL_BASE_IDX 1 +#define regVCN_RB4_DB_CTRL 0x0075 +#define regVCN_RB4_DB_CTRL_BASE_IDX 1 +#define regVCN_UMSCH_RB_DB_CTRL 0x0076 +#define regVCN_UMSCH_RB_DB_CTRL_BASE_IDX 1 +#define regVCN_RB_DB_CTRL 0x0077 +#define regVCN_RB_DB_CTRL_BASE_IDX 1 +#define regVCN_AGDB_CTRL0 0x0079 +#define regVCN_AGDB_CTRL0_BASE_IDX 1 +#define regVCN_AGDB_CTRL1 0x007a +#define regVCN_AGDB_CTRL1_BASE_IDX 1 +#define regVCN_AGDB_CTRL2 0x007b +#define regVCN_AGDB_CTRL2_BASE_IDX 1 +#define regVCN_AGDB_CTRL3 0x007c +#define regVCN_AGDB_CTRL3_BASE_IDX 1 +#define regVCN_AGDB_CTRL4 0x007d +#define regVCN_AGDB_CTRL4_BASE_IDX 1 +#define regVCN_AGDB_CTRL5 0x007e +#define regVCN_AGDB_CTRL5_BASE_IDX 1 +#define regVCN_AGDB_MASK0 0x007f +#define regVCN_AGDB_MASK0_BASE_IDX 1 +#define regVCN_AGDB_MASK1 0x0080 +#define regVCN_AGDB_MASK1_BASE_IDX 1 +#define regVCN_AGDB_MASK2 0x0081 +#define regVCN_AGDB_MASK2_BASE_IDX 1 +#define regVCN_AGDB_MASK3 0x0082 +#define regVCN_AGDB_MASK3_BASE_IDX 1 +#define regVCN_AGDB_MASK4 0x0083 +#define regVCN_AGDB_MASK4_BASE_IDX 1 +#define regVCN_AGDB_MASK5 0x0084 +#define regVCN_AGDB_MASK5_BASE_IDX 1 +#define regVCN_RB_ENABLE 0x0085 +#define regVCN_RB_ENABLE_BASE_IDX 1 +#define regVCN_RB_WPTR_CTRL 0x0086 +#define regVCN_RB_WPTR_CTRL_BASE_IDX 1 +#define regUVD_RB_RPTR 0x00ac +#define regUVD_RB_RPTR_BASE_IDX 1 +#define regUVD_RB_WPTR 0x00ad +#define regUVD_RB_WPTR_BASE_IDX 1 +#define regUVD_RB_RPTR2 0x00ae +#define regUVD_RB_RPTR2_BASE_IDX 1 +#define regUVD_RB_WPTR2 0x00af +#define regUVD_RB_WPTR2_BASE_IDX 1 +#define regUVD_RB_RPTR3 0x00b0 +#define regUVD_RB_RPTR3_BASE_IDX 1 +#define regUVD_RB_WPTR3 0x00b1 +#define regUVD_RB_WPTR3_BASE_IDX 1 +#define regUVD_RB_RPTR4 0x00b2 +#define regUVD_RB_RPTR4_BASE_IDX 1 +#define regUVD_RB_WPTR4 0x00b3 +#define regUVD_RB_WPTR4_BASE_IDX 1 +#define regUVD_OUT_RB_RPTR 0x00b4 +#define regUVD_OUT_RB_RPTR_BASE_IDX 1 +#define regUVD_OUT_RB_WPTR 0x00b5 +#define regUVD_OUT_RB_WPTR_BASE_IDX 1 +#define regUVD_AUDIO_RB_RPTR 0x00b6 +#define regUVD_AUDIO_RB_RPTR_BASE_IDX 1 +#define regUVD_AUDIO_RB_WPTR 0x00b7 +#define regUVD_AUDIO_RB_WPTR_BASE_IDX 1 +#define regUVD_RBC_RB_RPTR 0x00b8 +#define regUVD_RBC_RB_RPTR_BASE_IDX 1 +#define regUVD_RBC_RB_WPTR 0x00b9 +#define regUVD_RBC_RB_WPTR_BASE_IDX 1 +#define regUVD_DPG_LMA_CTL2 0x00bb +#define regUVD_DPG_LMA_CTL2_BASE_IDX 1 + + +// addressBlock: uvd_vcn_umsch_dec +// base address: 0x21500 +#define regVCN_UMSCH_MES_CNTL 0x0740 +#define regVCN_UMSCH_MES_CNTL_BASE_IDX 1 +#define regUMSCH_CTL 0x0741 +#define regUMSCH_CTL_BASE_IDX 1 +#define regUMSCH_CTL2 0x0742 +#define regUMSCH_CTL2_BASE_IDX 1 +#define regVCN_UMSCH_AGDB_WPTR0 0x0743 +#define regVCN_UMSCH_AGDB_WPTR0_BASE_IDX 1 +#define regVCN_UMSCH_AGDB_WPTR1 0x0744 +#define regVCN_UMSCH_AGDB_WPTR1_BASE_IDX 1 +#define regVCN_UMSCH_AGDB_WPTR2 0x0745 +#define regVCN_UMSCH_AGDB_WPTR2_BASE_IDX 1 +#define regVCN_UMSCH_AGDB_WPTR3 0x0746 +#define regVCN_UMSCH_AGDB_WPTR3_BASE_IDX 1 +#define regVCN_UMSCH_AGDB_WPTR4 0x0747 +#define regVCN_UMSCH_AGDB_WPTR4_BASE_IDX 1 +#define regVCN_UMSCH_AGDB_WPTR5 0x0748 +#define regVCN_UMSCH_AGDB_WPTR5_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX0 0x0749 +#define regVCN_UMSCH_MAILBOX0_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX_RESP0 0x074a +#define regVCN_UMSCH_MAILBOX_RESP0_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX1 0x074b +#define regVCN_UMSCH_MAILBOX1_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX_RESP1 0x074c +#define regVCN_UMSCH_MAILBOX_RESP1_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX2 0x074d +#define regVCN_UMSCH_MAILBOX2_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX_RESP2 0x074e +#define regVCN_UMSCH_MAILBOX_RESP2_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX3 0x074f +#define regVCN_UMSCH_MAILBOX3_BASE_IDX 1 +#define regVCN_UMSCH_MAILBOX_RESP3 0x0750 +#define regVCN_UMSCH_MAILBOX_RESP3_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER0 0x0751 +#define regVCN_UMSCH_SPARE_REGISTER0_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER1 0x0752 +#define regVCN_UMSCH_SPARE_REGISTER1_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER2 0x0753 +#define regVCN_UMSCH_SPARE_REGISTER2_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER3 0x0754 +#define regVCN_UMSCH_SPARE_REGISTER3_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER4 0x0755 +#define regVCN_UMSCH_SPARE_REGISTER4_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER5 0x0756 +#define regVCN_UMSCH_SPARE_REGISTER5_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER6 0x0757 +#define regVCN_UMSCH_SPARE_REGISTER6_BASE_IDX 1 +#define regVCN_UMSCH_SPARE_REGISTER7 0x0758 +#define regVCN_UMSCH_SPARE_REGISTER7_BASE_IDX 1 +#define regVCN_UMSCH_MES_UTCL1_CNTL 0x0759 +#define regVCN_UMSCH_MES_UTCL1_CNTL_BASE_IDX 1 +#define regVCN_UMSCH_MES_BUSY 0x075a +#define regVCN_UMSCH_MES_BUSY_BASE_IDX 1 +#define regVCN_UMSCH_RB_BASE_LO 0x075b +#define regVCN_UMSCH_RB_BASE_LO_BASE_IDX 1 +#define regVCN_UMSCH_RB_BASE_HI 0x075c +#define regVCN_UMSCH_RB_BASE_HI_BASE_IDX 1 +#define regVCN_UMSCH_RB_SIZE 0x075d +#define regVCN_UMSCH_RB_SIZE_BASE_IDX 1 +#define regVCN_UMSCH_RB_RPTR 0x075e +#define regVCN_UMSCH_RB_RPTR_BASE_IDX 1 +#define regVCN_UMSCH_RB_WPTR 0x075f +#define regVCN_UMSCH_RB_WPTR_BASE_IDX 1 +#define regVCN_UMSCH_MASTINT_EN 0x0760 +#define regVCN_UMSCH_MASTINT_EN_BASE_IDX 1 +#define regVCN_UMSCH_IH_CTRL 0x0761 +#define regVCN_UMSCH_IH_CTRL_BASE_IDX 1 +#define regVCN_UMSCH_SYS_INT_EN 0x0762 +#define regVCN_UMSCH_SYS_INT_EN_BASE_IDX 1 +#define regVCN_UMSCH_SYS_INT_STATUS 0x0763 +#define regVCN_UMSCH_SYS_INT_STATUS_BASE_IDX 1 +#define regVCN_UMSCH_SYS_INT_ACK 0x0764 +#define regVCN_UMSCH_SYS_INT_ACK_BASE_IDX 1 +#define regVCN_UMSCH_SYS_INT_SRC 0x0765 +#define regVCN_UMSCH_SYS_INT_SRC_BASE_IDX 1 +#define regVCN_UMSCH_IH_CTX_CTRL 0x0766 +#define regVCN_UMSCH_IH_CTX_CTRL_BASE_IDX 1 +#define regUVD_UMSCH_FORCE 0x076b +#define regUVD_UMSCH_FORCE_BASE_IDX 1 +#define regUMSCH_MES_RESET_CTRL 0x0770 +#define regUMSCH_MES_RESET_CTRL_BASE_IDX 1 + + +// addressBlock: uvd_vcn_cprs64dec +// base address: 0x21600 +#define regVCN_MES_PRGRM_CNTR_START 0x0780 +#define regVCN_MES_PRGRM_CNTR_START_BASE_IDX 1 +#define regVCN_MES_INTR_ROUTINE_START 0x0781 +#define regVCN_MES_INTR_ROUTINE_START_BASE_IDX 1 +#define regVCN_MES_MTVEC_LO 0x0781 +#define regVCN_MES_MTVEC_LO_BASE_IDX 1 +#define regVCN_MES_INTR_ROUTINE_START_HI 0x0782 +#define regVCN_MES_INTR_ROUTINE_START_HI_BASE_IDX 1 +#define regVCN_MES_MTVEC_HI 0x0782 +#define regVCN_MES_MTVEC_HI_BASE_IDX 1 +#define regVCN_MES_CNTL 0x0787 +#define regVCN_MES_CNTL_BASE_IDX 1 +#define regVCN_MES_PIPE_PRIORITY_CNTS 0x0788 +#define regVCN_MES_PIPE_PRIORITY_CNTS_BASE_IDX 1 +#define regVCN_MES_PIPE0_PRIORITY 0x0789 +#define regVCN_MES_PIPE0_PRIORITY_BASE_IDX 1 +#define regVCN_MES_PIPE1_PRIORITY 0x078a +#define regVCN_MES_PIPE1_PRIORITY_BASE_IDX 1 +#define regVCN_MES_PIPE2_PRIORITY 0x078b +#define regVCN_MES_PIPE2_PRIORITY_BASE_IDX 1 +#define regVCN_MES_PIPE3_PRIORITY 0x078c +#define regVCN_MES_PIPE3_PRIORITY_BASE_IDX 1 +#define regVCN_MES_HEADER_DUMP 0x078d +#define regVCN_MES_HEADER_DUMP_BASE_IDX 1 +#define regVCN_MES_MIE_LO 0x078e +#define regVCN_MES_MIE_LO_BASE_IDX 1 +#define regVCN_MES_MIE_HI 0x078f +#define regVCN_MES_MIE_HI_BASE_IDX 1 +#define regVCN_MES_INTERRUPT 0x0790 +#define regVCN_MES_INTERRUPT_BASE_IDX 1 +#define regVCN_MES_SCRATCH_INDEX 0x0791 +#define regVCN_MES_SCRATCH_INDEX_BASE_IDX 1 +#define regVCN_MES_SCRATCH_DATA 0x0792 +#define regVCN_MES_SCRATCH_DATA_BASE_IDX 1 +#define regVCN_MES_INSTR_PNTR 0x0793 +#define regVCN_MES_INSTR_PNTR_BASE_IDX 1 +#define regVCN_MES_MSCRATCH_HI 0x0794 +#define regVCN_MES_MSCRATCH_HI_BASE_IDX 1 +#define regVCN_MES_MSCRATCH_LO 0x0795 +#define regVCN_MES_MSCRATCH_LO_BASE_IDX 1 +#define regVCN_MES_MSTATUS_LO 0x0796 +#define regVCN_MES_MSTATUS_LO_BASE_IDX 1 +#define regVCN_MES_MSTATUS_HI 0x0797 +#define regVCN_MES_MSTATUS_HI_BASE_IDX 1 +#define regVCN_MES_MEPC_LO 0x0798 +#define regVCN_MES_MEPC_LO_BASE_IDX 1 +#define regVCN_MES_MEPC_HI 0x0799 +#define regVCN_MES_MEPC_HI_BASE_IDX 1 +#define regVCN_MES_MCAUSE_LO 0x079a +#define regVCN_MES_MCAUSE_LO_BASE_IDX 1 +#define regVCN_MES_MCAUSE_HI 0x079b +#define regVCN_MES_MCAUSE_HI_BASE_IDX 1 +#define regVCN_MES_MBADADDR_LO 0x079c +#define regVCN_MES_MBADADDR_LO_BASE_IDX 1 +#define regVCN_MES_MBADADDR_HI 0x079d +#define regVCN_MES_MBADADDR_HI_BASE_IDX 1 +#define regVCN_MES_MIP_LO 0x079e +#define regVCN_MES_MIP_LO_BASE_IDX 1 +#define regVCN_MES_MIP_HI 0x079f +#define regVCN_MES_MIP_HI_BASE_IDX 1 +#define regVCN_MES_IC_OP_CNTL 0x07a0 +#define regVCN_MES_IC_OP_CNTL_BASE_IDX 1 +#define regVCN_MES_MCYCLE_LO 0x07a6 +#define regVCN_MES_MCYCLE_LO_BASE_IDX 1 +#define regVCN_MES_MCYCLE_HI 0x07a7 +#define regVCN_MES_MCYCLE_HI_BASE_IDX 1 +#define regVCN_MES_MTIME_LO 0x07a8 +#define regVCN_MES_MTIME_LO_BASE_IDX 1 +#define regVCN_MES_MTIME_HI 0x07a9 +#define regVCN_MES_MTIME_HI_BASE_IDX 1 +#define regVCN_MES_MINSTRET_LO 0x07aa +#define regVCN_MES_MINSTRET_LO_BASE_IDX 1 +#define regVCN_MES_MINSTRET_HI 0x07ab +#define regVCN_MES_MINSTRET_HI_BASE_IDX 1 +#define regVCN_MES_MISA_LO 0x07ac +#define regVCN_MES_MISA_LO_BASE_IDX 1 +#define regVCN_MES_MISA_HI 0x07ad +#define regVCN_MES_MISA_HI_BASE_IDX 1 +#define regVCN_MES_MVENDORID_LO 0x07ae +#define regVCN_MES_MVENDORID_LO_BASE_IDX 1 +#define regVCN_MES_MVENDORID_HI 0x07af +#define regVCN_MES_MVENDORID_HI_BASE_IDX 1 +#define regVCN_MES_MARCHID_LO 0x07b0 +#define regVCN_MES_MARCHID_LO_BASE_IDX 1 +#define regVCN_MES_MARCHID_HI 0x07b1 +#define regVCN_MES_MARCHID_HI_BASE_IDX 1 +#define regVCN_MES_MIMPID_LO 0x07b2 +#define regVCN_MES_MIMPID_LO_BASE_IDX 1 +#define regVCN_MES_MIMPID_HI 0x07b3 +#define regVCN_MES_MIMPID_HI_BASE_IDX 1 +#define regVCN_MES_MHARTID_LO 0x07b4 +#define regVCN_MES_MHARTID_LO_BASE_IDX 1 +#define regVCN_MES_MHARTID_HI 0x07b5 +#define regVCN_MES_MHARTID_HI_BASE_IDX 1 +#define regVCN_MES_DC_BASE_CNTL 0x07b6 +#define regVCN_MES_DC_BASE_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_OP_CNTL 0x07b7 +#define regVCN_MES_DC_OP_CNTL_BASE_IDX 1 +#define regVCN_MES_MTIMECMP_LO 0x07b8 +#define regVCN_MES_MTIMECMP_LO_BASE_IDX 1 +#define regVCN_MES_MTIMECMP_HI 0x07b9 +#define regVCN_MES_MTIMECMP_HI_BASE_IDX 1 +#define regVCN_MES_GP0_LO 0x07c3 +#define regVCN_MES_GP0_LO_BASE_IDX 1 +#define regVCN_MES_GP0_HI 0x07c4 +#define regVCN_MES_GP0_HI_BASE_IDX 1 +#define regVCN_MES_GP1_LO 0x07c5 +#define regVCN_MES_GP1_LO_BASE_IDX 1 +#define regVCN_MES_GP1_HI 0x07c6 +#define regVCN_MES_GP1_HI_BASE_IDX 1 +#define regVCN_MES_GP2_LO 0x07c7 +#define regVCN_MES_GP2_LO_BASE_IDX 1 +#define regVCN_MES_GP2_HI 0x07c8 +#define regVCN_MES_GP2_HI_BASE_IDX 1 +#define regVCN_MES_GP3_LO 0x07c9 +#define regVCN_MES_GP3_LO_BASE_IDX 1 +#define regVCN_MES_GP3_HI 0x07ca +#define regVCN_MES_GP3_HI_BASE_IDX 1 +#define regVCN_MES_GP4_LO 0x07cb +#define regVCN_MES_GP4_LO_BASE_IDX 1 +#define regVCN_MES_GP4_HI 0x07cc +#define regVCN_MES_GP4_HI_BASE_IDX 1 +#define regVCN_MES_GP5_LO 0x07cd +#define regVCN_MES_GP5_LO_BASE_IDX 1 +#define regVCN_MES_GP5_HI 0x07ce +#define regVCN_MES_GP5_HI_BASE_IDX 1 +#define regVCN_MES_GP6_LO 0x07cf +#define regVCN_MES_GP6_LO_BASE_IDX 1 +#define regVCN_MES_GP6_HI 0x07d0 +#define regVCN_MES_GP6_HI_BASE_IDX 1 +#define regVCN_MES_GP7_LO 0x07d1 +#define regVCN_MES_GP7_LO_BASE_IDX 1 +#define regVCN_MES_GP7_HI 0x07d2 +#define regVCN_MES_GP7_HI_BASE_IDX 1 +#define regVCN_MES_GP8_LO 0x07d3 +#define regVCN_MES_GP8_LO_BASE_IDX 1 +#define regVCN_MES_GP8_HI 0x07d4 +#define regVCN_MES_GP8_HI_BASE_IDX 1 +#define regVCN_MES_GP9_LO 0x07d5 +#define regVCN_MES_GP9_LO_BASE_IDX 1 +#define regVCN_MES_GP9_HI 0x07d6 +#define regVCN_MES_GP9_HI_BASE_IDX 1 +#define regVCN_MES_DM_INDEX_ADDR 0x0800 +#define regVCN_MES_DM_INDEX_ADDR_BASE_IDX 1 +#define regVCN_MES_DM_INDEX_DATA 0x0801 +#define regVCN_MES_DM_INDEX_DATA_BASE_IDX 1 +#define regVCN_MES_LOCAL_BASE0_LO 0x0803 +#define regVCN_MES_LOCAL_BASE0_LO_BASE_IDX 1 +#define regVCN_MES_LOCAL_BASE0_HI 0x0804 +#define regVCN_MES_LOCAL_BASE0_HI_BASE_IDX 1 +#define regVCN_MES_LOCAL_MASK0_LO 0x0805 +#define regVCN_MES_LOCAL_MASK0_LO_BASE_IDX 1 +#define regVCN_MES_LOCAL_MASK0_HI 0x0806 +#define regVCN_MES_LOCAL_MASK0_HI_BASE_IDX 1 +#define regVCN_MES_LOCAL_APERTURE 0x0807 +#define regVCN_MES_LOCAL_APERTURE_BASE_IDX 1 +#define regVCN_MES_LOCAL_INSTR_BASE_LO 0x0808 +#define regVCN_MES_LOCAL_INSTR_BASE_LO_BASE_IDX 1 +#define regVCN_MES_LOCAL_INSTR_BASE_HI 0x0809 +#define regVCN_MES_LOCAL_INSTR_BASE_HI_BASE_IDX 1 +#define regVCN_MES_LOCAL_INSTR_MASK_LO 0x080a +#define regVCN_MES_LOCAL_INSTR_MASK_LO_BASE_IDX 1 +#define regVCN_MES_LOCAL_INSTR_MASK_HI 0x080b +#define regVCN_MES_LOCAL_INSTR_MASK_HI_BASE_IDX 1 +#define regVCN_MES_LOCAL_INSTR_APERTURE 0x080c +#define regVCN_MES_LOCAL_INSTR_APERTURE_BASE_IDX 1 +#define regVCN_MES_LOCAL_SCRATCH_APERTURE 0x080d +#define regVCN_MES_LOCAL_SCRATCH_APERTURE_BASE_IDX 1 +#define regVCN_MES_LOCAL_SCRATCH_BASE_LO 0x080e +#define regVCN_MES_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1 +#define regVCN_MES_LOCAL_SCRATCH_BASE_HI 0x080f +#define regVCN_MES_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1 +#define regVCN_MES_PERFCOUNT_CNTL 0x0819 +#define regVCN_MES_PERFCOUNT_CNTL_BASE_IDX 1 +#define regVCN_MES_PENDING_INTERRUPT 0x081a +#define regVCN_MES_PENDING_INTERRUPT_BASE_IDX 1 +#define regVCN_MES_PRGRM_CNTR_START_HI 0x081d +#define regVCN_MES_PRGRM_CNTR_START_HI_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_16 0x081f +#define regVCN_MES_INTERRUPT_DATA_16_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_17 0x0820 +#define regVCN_MES_INTERRUPT_DATA_17_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_18 0x0821 +#define regVCN_MES_INTERRUPT_DATA_18_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_19 0x0822 +#define regVCN_MES_INTERRUPT_DATA_19_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_20 0x0823 +#define regVCN_MES_INTERRUPT_DATA_20_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_21 0x0824 +#define regVCN_MES_INTERRUPT_DATA_21_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_22 0x0825 +#define regVCN_MES_INTERRUPT_DATA_22_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_23 0x0826 +#define regVCN_MES_INTERRUPT_DATA_23_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_24 0x0827 +#define regVCN_MES_INTERRUPT_DATA_24_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_25 0x0828 +#define regVCN_MES_INTERRUPT_DATA_25_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_26 0x0829 +#define regVCN_MES_INTERRUPT_DATA_26_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_27 0x082a +#define regVCN_MES_INTERRUPT_DATA_27_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_28 0x082b +#define regVCN_MES_INTERRUPT_DATA_28_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_29 0x082c +#define regVCN_MES_INTERRUPT_DATA_29_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_30 0x082d +#define regVCN_MES_INTERRUPT_DATA_30_BASE_IDX 1 +#define regVCN_MES_INTERRUPT_DATA_31 0x082e +#define regVCN_MES_INTERRUPT_DATA_31_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE0_BASE 0x082f +#define regVCN_MES_DC_APERTURE0_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE0_MASK 0x0830 +#define regVCN_MES_DC_APERTURE0_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE0_CNTL 0x0831 +#define regVCN_MES_DC_APERTURE0_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE1_BASE 0x0832 +#define regVCN_MES_DC_APERTURE1_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE1_MASK 0x0833 +#define regVCN_MES_DC_APERTURE1_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE1_CNTL 0x0834 +#define regVCN_MES_DC_APERTURE1_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE2_BASE 0x0835 +#define regVCN_MES_DC_APERTURE2_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE2_MASK 0x0836 +#define regVCN_MES_DC_APERTURE2_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE2_CNTL 0x0837 +#define regVCN_MES_DC_APERTURE2_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE3_BASE 0x0838 +#define regVCN_MES_DC_APERTURE3_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE3_MASK 0x0839 +#define regVCN_MES_DC_APERTURE3_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE3_CNTL 0x083a +#define regVCN_MES_DC_APERTURE3_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE4_BASE 0x083b +#define regVCN_MES_DC_APERTURE4_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE4_MASK 0x083c +#define regVCN_MES_DC_APERTURE4_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE4_CNTL 0x083d +#define regVCN_MES_DC_APERTURE4_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE5_BASE 0x083e +#define regVCN_MES_DC_APERTURE5_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE5_MASK 0x083f +#define regVCN_MES_DC_APERTURE5_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE5_CNTL 0x0840 +#define regVCN_MES_DC_APERTURE5_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE6_BASE 0x0841 +#define regVCN_MES_DC_APERTURE6_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE6_MASK 0x0842 +#define regVCN_MES_DC_APERTURE6_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE6_CNTL 0x0843 +#define regVCN_MES_DC_APERTURE6_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE7_BASE 0x0844 +#define regVCN_MES_DC_APERTURE7_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE7_MASK 0x0845 +#define regVCN_MES_DC_APERTURE7_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE7_CNTL 0x0846 +#define regVCN_MES_DC_APERTURE7_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE8_BASE 0x0847 +#define regVCN_MES_DC_APERTURE8_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE8_MASK 0x0848 +#define regVCN_MES_DC_APERTURE8_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE8_CNTL 0x0849 +#define regVCN_MES_DC_APERTURE8_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE9_BASE 0x084a +#define regVCN_MES_DC_APERTURE9_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE9_MASK 0x084b +#define regVCN_MES_DC_APERTURE9_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE9_CNTL 0x084c +#define regVCN_MES_DC_APERTURE9_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE10_BASE 0x084d +#define regVCN_MES_DC_APERTURE10_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE10_MASK 0x084e +#define regVCN_MES_DC_APERTURE10_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE10_CNTL 0x084f +#define regVCN_MES_DC_APERTURE10_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE11_BASE 0x0850 +#define regVCN_MES_DC_APERTURE11_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE11_MASK 0x0851 +#define regVCN_MES_DC_APERTURE11_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE11_CNTL 0x0852 +#define regVCN_MES_DC_APERTURE11_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE12_BASE 0x0853 +#define regVCN_MES_DC_APERTURE12_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE12_MASK 0x0854 +#define regVCN_MES_DC_APERTURE12_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE12_CNTL 0x0855 +#define regVCN_MES_DC_APERTURE12_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE13_BASE 0x0856 +#define regVCN_MES_DC_APERTURE13_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE13_MASK 0x0857 +#define regVCN_MES_DC_APERTURE13_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE13_CNTL 0x0858 +#define regVCN_MES_DC_APERTURE13_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE14_BASE 0x0859 +#define regVCN_MES_DC_APERTURE14_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE14_MASK 0x085a +#define regVCN_MES_DC_APERTURE14_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE14_CNTL 0x085b +#define regVCN_MES_DC_APERTURE14_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE15_BASE 0x085c +#define regVCN_MES_DC_APERTURE15_BASE_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE15_MASK 0x085d +#define regVCN_MES_DC_APERTURE15_MASK_BASE_IDX 1 +#define regVCN_MES_DC_APERTURE15_CNTL 0x085e +#define regVCN_MES_DC_APERTURE15_CNTL_BASE_IDX 1 + + +// addressBlock: uvd_vcn_hypdec +// base address: 0x21a00 +#define regVCN_MES_IC_BASE_LO 0x08d0 +#define regVCN_MES_IC_BASE_LO_BASE_IDX 1 +#define regVCN_MES_MIBASE_LO 0x08d0 +#define regVCN_MES_MIBASE_LO_BASE_IDX 1 +#define regVCN_MES_IC_BASE_HI 0x08d1 +#define regVCN_MES_IC_BASE_HI_BASE_IDX 1 +#define regVCN_MES_MIBASE_HI 0x08d1 +#define regVCN_MES_MIBASE_HI_BASE_IDX 1 +#define regVCN_MES_IC_BASE_CNTL 0x08d2 +#define regVCN_MES_IC_BASE_CNTL_BASE_IDX 1 +#define regVCN_MES_DC_BASE_LO 0x08d4 +#define regVCN_MES_DC_BASE_LO_BASE_IDX 1 +#define regVCN_MES_MDBASE_LO 0x08d4 +#define regVCN_MES_MDBASE_LO_BASE_IDX 1 +#define regVCN_MES_DC_BASE_HI 0x08d5 +#define regVCN_MES_DC_BASE_HI_BASE_IDX 1 +#define regVCN_MES_MDBASE_HI 0x08d5 +#define regVCN_MES_MDBASE_HI_BASE_IDX 1 +#define regVCN_MES_MIBOUND_LO 0x08db +#define regVCN_MES_MIBOUND_LO_BASE_IDX 1 +#define regVCN_MES_MIBOUND_HI 0x08dc +#define regVCN_MES_MIBOUND_HI_BASE_IDX 1 +#define regVCN_MES_MDBOUND_LO 0x08dd +#define regVCN_MES_MDBOUND_LO_BASE_IDX 1 +#define regVCN_MES_MDBOUND_HI 0x08de +#define regVCN_MES_MDBOUND_HI_BASE_IDX 1 + + +// addressBlock: uvd_slmi_adpdec +// base address: 0x21c00 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x0900 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x0901 +#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x0902 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0903 +#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0904 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0905 +#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0906 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0907 +#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0908 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0909 +#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x090a +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x090b +#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x090c +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x090d +#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x090e +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x090f +#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1 +#define regUVD_LMI_MMSCH_NC_VMID 0x0910 +#define regUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1 +#define regUVD_LMI_MMSCH_CTRL 0x0911 +#define regUVD_LMI_MMSCH_CTRL_BASE_IDX 1 +#define regUVD_MMSCH_LMI_STATUS 0x0912 +#define regUVD_MMSCH_LMI_STATUS_BASE_IDX 1 +#define regUMSCH_IOV_ACTIVE_FCN_ID 0x0920 +#define regUMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 1 +#define regUVD_UMSCH_LMI_STATUS 0x0923 +#define regUVD_UMSCH_LMI_STATUS_BASE_IDX 1 + + +// addressBlock: uvdctxind +// base address: 0x0 +#define ixUVD_CGC_MEM_CTRL 0x0000 +#define ixUVD_CGC_CTRL2 0x0001 +#define ixUVD_CGC_MEM_DS_CTRL 0x0002 +#define ixUVD_CGC_MEM_SD_CTRL 0x0003 +#define ixUVD_SW_SCRATCH_00 0x0004 +#define ixUVD_SW_SCRATCH_01 0x0005 +#define ixUVD_SW_SCRATCH_02 0x0006 +#define ixUVD_SW_SCRATCH_03 0x0007 +#define ixUVD_SW_SCRATCH_04 0x0008 +#define ixUVD_SW_SCRATCH_05 0x0009 +#define ixUVD_SW_SCRATCH_06 0x000a +#define ixUVD_SW_SCRATCH_07 0x000b +#define ixUVD_SW_SCRATCH_08 0x000c +#define ixUVD_SW_SCRATCH_09 0x000d +#define ixUVD_SW_SCRATCH_10 0x000e +#define ixUVD_SW_SCRATCH_11 0x000f +#define ixUVD_SW_SCRATCH_12 0x0010 +#define ixUVD_SW_SCRATCH_13 0x0011 +#define ixUVD_SW_SCRATCH_14 0x0012 +#define ixUVD_SW_SCRATCH_15 0x0013 +#define ixUVD_IH_SEM_CTRL 0x001e + + +// addressBlock: lmi_adp_indirect +// base address: 0x0 +#define ixUVD_LMI_CRC0 0x0000 +#define ixUVD_LMI_CRC1 0x0001 +#define ixUVD_LMI_CRC2 0x0002 +#define ixUVD_LMI_CRC3 0x0003 +#define ixUVD_LMI_CRC10 0x000a +#define ixUVD_LMI_CRC11 0x000b +#define ixUVD_LMI_CRC12 0x000c +#define ixUVD_LMI_CRC13 0x000d +#define ixUVD_LMI_CRC14 0x000e +#define ixUVD_LMI_CRC15 0x000f +#define ixUVD_LMI_SWAP_CNTL2 0x0029 +#define ixUVD_MEMCHECK_SYS_INT_EN 0x0134 +#define ixUVD_MEMCHECK_SYS_INT_STAT 0x0135 +#define ixUVD_MEMCHECK_SYS_INT_ACK 0x0136 +#define ixUVD_MEMCHECK_VCPU_INT_EN 0x0137 +#define ixUVD_MEMCHECK_VCPU_INT_STAT 0x0138 +#define ixUVD_MEMCHECK_VCPU_INT_ACK 0x0139 +#define ixUVD_MEMCHECK2_SYS_INT_STAT 0x0140 +#define ixUVD_MEMCHECK2_SYS_INT_ACK 0x0141 +#define ixUVD_MEMCHECK2_VCPU_INT_STAT 0x0142 +#define ixUVD_MEMCHECK2_VCPU_INT_ACK 0x0143 + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h new file mode 100644 index 000000000000..5c119a6b87fb --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h @@ -0,0 +1,7627 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _vcn_5_0_0_SH_MASK_HEADER +#define _vcn_5_0_0_SH_MASK_HEADER + + +// addressBlock: uvd_uvddec +//UVD_TOP_CTRL +#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0 +#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4 +#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL +#define UVD_TOP_CTRL__STD_VERSION_MASK 0x00000010L +//UVD_CGC_GATE +#define UVD_CGC_GATE__SYS__SHIFT 0x0 +#define UVD_CGC_GATE__UDEC__SHIFT 0x1 +#define UVD_CGC_GATE__MPEG2__SHIFT 0x2 +#define UVD_CGC_GATE__REGS__SHIFT 0x3 +#define UVD_CGC_GATE__RBC__SHIFT 0x4 +#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5 +#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6 +#define UVD_CGC_GATE__IDCT__SHIFT 0x7 +#define UVD_CGC_GATE__MPRD__SHIFT 0x8 +#define UVD_CGC_GATE__MPC__SHIFT 0x9 +#define UVD_CGC_GATE__LBSI__SHIFT 0xa +#define UVD_CGC_GATE__LRBBM__SHIFT 0xb +#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc +#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd +#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe +#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf +#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10 +#define UVD_CGC_GATE__WCB__SHIFT 0x11 +#define UVD_CGC_GATE__VCPU__SHIFT 0x12 +#define UVD_CGC_GATE__MMSCH__SHIFT 0x14 +#define UVD_CGC_GATE__LCM0__SHIFT 0x15 +#define UVD_CGC_GATE__LCM1__SHIFT 0x16 +#define UVD_CGC_GATE__MIF__SHIFT 0x17 +#define UVD_CGC_GATE__VREG__SHIFT 0x18 +#define UVD_CGC_GATE__PE__SHIFT 0x19 +#define UVD_CGC_GATE__PPU__SHIFT 0x1a +#define UVD_CGC_GATE__SYS_MASK 0x00000001L +#define UVD_CGC_GATE__UDEC_MASK 0x00000002L +#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L +#define UVD_CGC_GATE__REGS_MASK 0x00000008L +#define UVD_CGC_GATE__RBC_MASK 0x00000010L +#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L +#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L +#define UVD_CGC_GATE__IDCT_MASK 0x00000080L +#define UVD_CGC_GATE__MPRD_MASK 0x00000100L +#define UVD_CGC_GATE__MPC_MASK 0x00000200L +#define UVD_CGC_GATE__LBSI_MASK 0x00000400L +#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L +#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L +#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L +#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L +#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L +#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L +#define UVD_CGC_GATE__WCB_MASK 0x00020000L +#define UVD_CGC_GATE__VCPU_MASK 0x00040000L +#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L +#define UVD_CGC_GATE__LCM0_MASK 0x00200000L +#define UVD_CGC_GATE__LCM1_MASK 0x00400000L +#define UVD_CGC_GATE__MIF_MASK 0x00800000L +#define UVD_CGC_GATE__VREG_MASK 0x01000000L +#define UVD_CGC_GATE__PE_MASK 0x02000000L +#define UVD_CGC_GATE__PPU_MASK 0x04000000L +//UVD_CGC_CTRL +#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 +#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2 +#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6 +#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb +#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc +#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd +#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe +#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf +#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10 +#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11 +#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12 +#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13 +#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14 +#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15 +#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16 +#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17 +#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18 +#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19 +#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a +#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b +#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c +#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d +#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f +#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L +#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL +#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L +#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L +#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L +#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L +#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L +#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L +#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L +#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L +#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L +#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L +#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L +#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L +#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L +#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L +#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L +#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L +#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L +#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L +#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L +#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L +#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L +//AVM_SUVD_CGC_GATE +#define AVM_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define AVM_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define AVM_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define AVM_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define AVM_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define AVM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define AVM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define AVM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define AVM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define AVM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define AVM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define AVM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define AVM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define AVM_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define AVM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define AVM_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define AVM_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define AVM_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define AVM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define AVM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define AVM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define AVM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define AVM_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define AVM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define AVM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define AVM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define AVM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define AVM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define AVM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define AVM_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define AVM_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define AVM_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define AVM_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define AVM_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define AVM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define AVM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define AVM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define AVM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define AVM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define AVM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define AVM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define AVM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define AVM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define AVM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define AVM_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define AVM_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define AVM_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define AVM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define AVM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define AVM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define AVM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define AVM_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define AVM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define AVM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define AVM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define AVM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define AVM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define AVM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//EFC_SUVD_CGC_GATE +#define EFC_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define EFC_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define EFC_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define EFC_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define EFC_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define EFC_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define EFC_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define EFC_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define EFC_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define EFC_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define EFC_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define EFC_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define EFC_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define EFC_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define EFC_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define EFC_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define EFC_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define EFC_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define EFC_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define EFC_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define EFC_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define EFC_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define EFC_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define EFC_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define EFC_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define EFC_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define EFC_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define EFC_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define EFC_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define EFC_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define EFC_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define EFC_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define EFC_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define EFC_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define EFC_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define EFC_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define EFC_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define EFC_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define EFC_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define EFC_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define EFC_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define EFC_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define EFC_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define EFC_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define EFC_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define EFC_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define EFC_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define EFC_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define EFC_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define EFC_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define EFC_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define EFC_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define EFC_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define EFC_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define EFC_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define EFC_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define EFC_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define EFC_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//ENT_SUVD_CGC_GATE +#define ENT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define ENT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define ENT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define ENT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define ENT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define ENT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define ENT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define ENT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define ENT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define ENT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define ENT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define ENT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define ENT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define ENT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define ENT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define ENT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define ENT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define ENT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define ENT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define ENT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define ENT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define ENT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define ENT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define ENT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define ENT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define ENT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define ENT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define ENT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define ENT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define ENT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define ENT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define ENT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define ENT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define ENT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define ENT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define ENT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define ENT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define ENT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define ENT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define ENT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define ENT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define ENT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define ENT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define ENT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define ENT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define ENT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define ENT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define ENT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define ENT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define ENT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define ENT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define ENT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define ENT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define ENT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define ENT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define ENT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define ENT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define ENT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//IME_SUVD_CGC_GATE +#define IME_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define IME_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define IME_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define IME_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define IME_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define IME_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define IME_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define IME_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define IME_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define IME_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define IME_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define IME_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define IME_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define IME_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define IME_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define IME_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define IME_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define IME_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define IME_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define IME_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define IME_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define IME_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define IME_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define IME_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define IME_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define IME_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define IME_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define IME_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define IME_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define IME_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define IME_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define IME_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define IME_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define IME_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define IME_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define IME_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define IME_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define IME_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define IME_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define IME_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define IME_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define IME_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define IME_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define IME_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define IME_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define IME_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define IME_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define IME_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define IME_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define IME_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define IME_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define IME_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define IME_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define IME_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define IME_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define IME_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define IME_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define IME_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define IME_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define IME_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//PPU_SUVD_CGC_GATE +#define PPU_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define PPU_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define PPU_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define PPU_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define PPU_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define PPU_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define PPU_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define PPU_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define PPU_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define PPU_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define PPU_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define PPU_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define PPU_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define PPU_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define PPU_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define PPU_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define PPU_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define PPU_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define PPU_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define PPU_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define PPU_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define PPU_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define PPU_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define PPU_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define PPU_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define PPU_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define PPU_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define PPU_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define PPU_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define PPU_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define PPU_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define PPU_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define PPU_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define PPU_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define PPU_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define PPU_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define PPU_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define PPU_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define PPU_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define PPU_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define PPU_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define PPU_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define PPU_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define PPU_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define PPU_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define PPU_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define PPU_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define PPU_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define PPU_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define PPU_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define PPU_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define PPU_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define PPU_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define PPU_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define PPU_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define PPU_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define PPU_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define PPU_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SAOE_SUVD_CGC_GATE +#define SAOE_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SAOE_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SAOE_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SAOE_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SAOE_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SAOE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SAOE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SAOE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SAOE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SAOE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SAOE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SAOE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SAOE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SAOE_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SAOE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SAOE_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SAOE_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SAOE_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SAOE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SAOE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SAOE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SAOE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SAOE_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SAOE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SAOE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SAOE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SAOE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SAOE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SAOE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SAOE_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SAOE_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SAOE_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SAOE_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SAOE_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SAOE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SAOE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SAOE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SAOE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SAOE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SAOE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SAOE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SAOE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SAOE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SAOE_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SAOE_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SAOE_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SAOE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SAOE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SAOE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SAOE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SAOE_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SAOE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SAOE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SAOE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SAOE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SAOE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SAOE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SCM_SUVD_CGC_GATE +#define SCM_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SCM_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SCM_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SCM_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SCM_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SCM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SCM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SCM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SCM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SCM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SCM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SCM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SCM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SCM_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SCM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SCM_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SCM_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SCM_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SCM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SCM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SCM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SCM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SCM_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SCM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SCM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SCM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SCM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SCM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SCM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SCM_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SCM_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SCM_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SCM_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SCM_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SCM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SCM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SCM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SCM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SCM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SCM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SCM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SCM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SCM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SCM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SCM_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SCM_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SCM_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SCM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SCM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SCM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SCM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SCM_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SCM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SCM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SCM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SCM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SCM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SCM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SDB_SUVD_CGC_GATE +#define SDB_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SDB_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SDB_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SDB_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SDB_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SDB_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SDB_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SDB_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SDB_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SDB_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SDB_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SDB_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SDB_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SDB_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SDB_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SDB_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SDB_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SDB_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SDB_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SDB_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SDB_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SDB_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SDB_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SDB_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SDB_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SDB_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SDB_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SDB_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SDB_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SDB_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SDB_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SDB_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SDB_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SDB_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SDB_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SDB_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SDB_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SDB_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SDB_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SDB_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SDB_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SDB_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SDB_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SDB_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SDB_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SDB_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SDB_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SDB_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SDB_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SDB_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SDB_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SDB_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SDB_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SDB_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SDB_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SDB_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SDB_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SDB_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT0_NXT_SUVD_CGC_GATE +#define SIT0_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT0_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT0_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT0_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT0_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT0_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT0_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT0_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT0_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT0_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT0_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT0_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT0_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT0_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT0_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT0_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT0_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT0_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT0_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT0_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT0_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT1_NXT_SUVD_CGC_GATE +#define SIT1_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT1_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT1_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT1_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT1_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT1_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT1_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT1_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT1_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT1_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT1_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT1_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT1_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT1_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT1_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT1_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT1_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT1_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT1_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT1_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT1_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT2_NXT_SUVD_CGC_GATE +#define SIT2_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT2_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT2_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT2_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT2_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT2_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT2_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT2_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT2_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT2_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT2_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT2_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT2_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT2_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT2_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT2_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT2_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT2_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT2_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT2_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT2_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SIT_SUVD_CGC_GATE +#define SIT_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SIT_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SIT_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SIT_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SIT_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SIT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SIT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SIT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SIT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SIT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SIT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SIT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SIT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SIT_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SIT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SIT_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SIT_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SIT_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SIT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SIT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SIT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SIT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SIT_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SIT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SIT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SIT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SIT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SIT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SIT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SIT_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SIT_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SIT_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SIT_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SIT_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SIT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SIT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SIT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SIT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SIT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SIT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SIT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SIT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SIT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SIT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SIT_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SIT_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SIT_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SIT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SIT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SIT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SIT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SIT_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SIT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SIT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SIT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SIT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SIT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SIT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SMPA_SUVD_CGC_GATE +#define SMPA_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SMPA_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SMPA_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SMPA_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SMPA_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SMPA_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SMPA_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SMPA_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SMPA_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SMPA_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SMPA_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SMPA_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SMPA_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SMPA_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SMPA_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SMPA_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SMPA_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SMPA_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SMPA_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SMPA_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SMPA_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SMPA_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SMPA_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SMPA_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SMPA_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SMPA_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SMPA_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SMPA_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SMPA_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SMPA_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SMPA_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SMPA_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SMPA_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SMPA_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SMPA_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SMPA_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SMPA_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SMPA_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SMPA_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SMPA_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SMPA_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SMPA_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SMPA_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SMPA_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SMPA_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SMPA_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SMPA_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SMPA_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SMPA_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SMPA_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SMPA_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SMPA_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SMPA_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SMPA_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SMPA_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SMPA_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SMPA_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SMP_SUVD_CGC_GATE +#define SMP_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SMP_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SMP_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SMP_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SMP_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SMP_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SMP_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SMP_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SMP_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SMP_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SMP_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SMP_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SMP_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SMP_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SMP_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SMP_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SMP_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SMP_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SMP_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SMP_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SMP_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SMP_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SMP_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SMP_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SMP_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SMP_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SMP_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SMP_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SMP_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SMP_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SMP_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SMP_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SMP_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SMP_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SMP_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SMP_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SMP_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SMP_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SMP_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SMP_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SMP_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SMP_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SMP_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SMP_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SMP_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SMP_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SMP_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SMP_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SMP_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SMP_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SMP_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SMP_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SMP_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SMP_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SMP_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SMP_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SMP_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SMP_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//SRE_SUVD_CGC_GATE +#define SRE_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define SRE_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define SRE_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define SRE_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define SRE_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define SRE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define SRE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define SRE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define SRE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define SRE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define SRE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define SRE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define SRE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define SRE_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define SRE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define SRE_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define SRE_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define SRE_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define SRE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define SRE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define SRE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define SRE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define SRE_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define SRE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define SRE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define SRE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define SRE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define SRE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define SRE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define SRE_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define SRE_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define SRE_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define SRE_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define SRE_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define SRE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define SRE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define SRE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define SRE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define SRE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define SRE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define SRE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define SRE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define SRE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define SRE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define SRE_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define SRE_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define SRE_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define SRE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define SRE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define SRE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define SRE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define SRE_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define SRE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define SRE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define SRE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define SRE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define SRE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define SRE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//UVD_SUVD_CGC_GATE +#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//AVM_SUVD_CGC_GATE2 +#define AVM_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define AVM_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define AVM_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define AVM_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define AVM_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define AVM_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define AVM_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define AVM_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define AVM_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define AVM_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define AVM_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define AVM_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define AVM_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define AVM_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define AVM_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define AVM_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//DBR_SUVD_CGC_GATE2 +#define DBR_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define DBR_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define DBR_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define DBR_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define DBR_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define DBR_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define DBR_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define DBR_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define DBR_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define DBR_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define DBR_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define DBR_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define DBR_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define DBR_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define DBR_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define DBR_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//ENT_SUVD_CGC_GATE2 +#define ENT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define ENT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define ENT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define ENT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define ENT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define ENT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define ENT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define ENT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define ENT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define ENT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define ENT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define ENT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define ENT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define ENT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define ENT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define ENT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//IME_SUVD_CGC_GATE2 +#define IME_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define IME_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define IME_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define IME_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define IME_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define IME_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define IME_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define IME_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define IME_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define IME_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define IME_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define IME_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define IME_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define IME_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define IME_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define IME_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SAOE_SUVD_CGC_GATE2 +#define SAOE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SAOE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SAOE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SAOE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SAOE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SAOE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SAOE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SAOE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SAOE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SAOE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SAOE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SAOE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SAOE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SAOE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SAOE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SAOE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SDB_SUVD_CGC_GATE2 +#define SDB_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SDB_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SDB_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SDB_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SDB_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SDB_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SDB_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SDB_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SDB_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SDB_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SDB_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SDB_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SDB_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SDB_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SDB_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SDB_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT0_NXT_SUVD_CGC_GATE2 +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT0_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT0_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT1_NXT_SUVD_CGC_GATE2 +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT1_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT1_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT2_NXT_SUVD_CGC_GATE2 +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT2_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT2_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SIT_SUVD_CGC_GATE2 +#define SIT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SIT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SIT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SIT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SIT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SIT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SIT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SIT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SIT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SIT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SIT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SIT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SIT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SIT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SIT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SIT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SMPA_SUVD_CGC_GATE2 +#define SMPA_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SMPA_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SMPA_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SMPA_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SMPA_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SMPA_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SMPA_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SMPA_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SMPA_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SMPA_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SMPA_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SMPA_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SMPA_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SMPA_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SMPA_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SMPA_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SMP_SUVD_CGC_GATE2 +#define SMP_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SMP_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SMP_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SMP_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SMP_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SMP_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SMP_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SMP_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SMP_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SMP_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SMP_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SMP_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SMP_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SMP_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SMP_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SMP_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//SRE_SUVD_CGC_GATE2 +#define SRE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define SRE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define SRE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define SRE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define SRE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define SRE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define SRE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define SRE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define SRE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define SRE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define SRE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define SRE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define SRE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define SRE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define SRE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define SRE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//UVD_SUVD_CGC_GATE2 +#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define UVD_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define UVD_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define UVD_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define UVD_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define UVD_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define UVD_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//AVM_SUVD_CGC_CTRL +#define AVM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define AVM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define AVM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define AVM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define AVM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define AVM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define AVM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define AVM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define AVM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define AVM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define AVM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define AVM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define AVM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define AVM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define AVM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define AVM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define AVM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define AVM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define AVM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define AVM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define AVM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define AVM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define AVM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define AVM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define AVM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define AVM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define AVM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define AVM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define AVM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define AVM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define AVM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define AVM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define AVM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define AVM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define AVM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define AVM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define AVM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define AVM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define AVM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define AVM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//DBR_SUVD_CGC_CTRL +#define DBR_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define DBR_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define DBR_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define DBR_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define DBR_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define DBR_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define DBR_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define DBR_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define DBR_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define DBR_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define DBR_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define DBR_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define DBR_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define DBR_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define DBR_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define DBR_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define DBR_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define DBR_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define DBR_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define DBR_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define DBR_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define DBR_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define DBR_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define DBR_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define DBR_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define DBR_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define DBR_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define DBR_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define DBR_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define DBR_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define DBR_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define DBR_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define DBR_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define DBR_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define DBR_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define DBR_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define DBR_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define DBR_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define DBR_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define DBR_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//EFC_SUVD_CGC_CTRL +#define EFC_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define EFC_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define EFC_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define EFC_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define EFC_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define EFC_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define EFC_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define EFC_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define EFC_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define EFC_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define EFC_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define EFC_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define EFC_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define EFC_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define EFC_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define EFC_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define EFC_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define EFC_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define EFC_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define EFC_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define EFC_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define EFC_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define EFC_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define EFC_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define EFC_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define EFC_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define EFC_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define EFC_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define EFC_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define EFC_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define EFC_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define EFC_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define EFC_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define EFC_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define EFC_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define EFC_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define EFC_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define EFC_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define EFC_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define EFC_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//ENT_SUVD_CGC_CTRL +#define ENT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define ENT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define ENT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define ENT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define ENT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define ENT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define ENT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define ENT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define ENT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define ENT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define ENT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define ENT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define ENT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define ENT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define ENT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define ENT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define ENT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define ENT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define ENT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define ENT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define ENT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define ENT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define ENT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define ENT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define ENT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define ENT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define ENT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define ENT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define ENT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define ENT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define ENT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define ENT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define ENT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define ENT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define ENT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define ENT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define ENT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define ENT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define ENT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define ENT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//IME_SUVD_CGC_CTRL +#define IME_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define IME_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define IME_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define IME_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define IME_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define IME_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define IME_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define IME_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define IME_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define IME_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define IME_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define IME_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define IME_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define IME_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define IME_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define IME_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define IME_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define IME_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define IME_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define IME_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define IME_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define IME_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define IME_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define IME_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define IME_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define IME_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define IME_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define IME_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define IME_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define IME_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define IME_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define IME_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define IME_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define IME_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define IME_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define IME_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define IME_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define IME_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define IME_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define IME_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define IME_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define IME_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//PPU_SUVD_CGC_CTRL +#define PPU_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define PPU_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define PPU_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define PPU_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define PPU_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define PPU_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define PPU_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define PPU_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define PPU_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define PPU_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define PPU_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define PPU_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define PPU_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define PPU_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define PPU_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define PPU_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define PPU_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define PPU_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define PPU_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define PPU_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define PPU_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define PPU_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define PPU_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define PPU_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define PPU_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define PPU_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define PPU_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define PPU_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define PPU_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define PPU_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define PPU_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define PPU_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define PPU_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define PPU_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define PPU_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define PPU_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define PPU_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define PPU_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define PPU_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define PPU_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SAOE_SUVD_CGC_CTRL +#define SAOE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SAOE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SAOE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SAOE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SAOE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SAOE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SAOE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SAOE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SAOE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SAOE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SAOE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SAOE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SAOE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SAOE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SAOE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SAOE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SAOE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SAOE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SAOE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SAOE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SAOE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SAOE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SAOE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SAOE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SAOE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SAOE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SAOE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SAOE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SAOE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SAOE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SCM_SUVD_CGC_CTRL +#define SCM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SCM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SCM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SCM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SCM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SCM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SCM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SCM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SCM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SCM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SCM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SCM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SCM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SCM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SCM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SCM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SCM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SCM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SCM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SCM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SCM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SCM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SCM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SCM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SCM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SCM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SCM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SCM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SCM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SCM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SCM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SCM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SCM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SCM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SCM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SCM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SCM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SCM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SCM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SCM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SDB_SUVD_CGC_CTRL +#define SDB_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SDB_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SDB_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SDB_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SDB_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SDB_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SDB_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SDB_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SDB_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SDB_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SDB_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SDB_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SDB_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SDB_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SDB_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SDB_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SDB_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SDB_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SDB_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SDB_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SDB_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SDB_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SDB_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SDB_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SDB_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SDB_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SDB_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SDB_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SDB_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SDB_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SDB_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SDB_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SDB_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SDB_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SDB_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SDB_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SDB_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SDB_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SDB_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SDB_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT0_NXT_SUVD_CGC_CTRL +#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT1_NXT_SUVD_CGC_CTRL +#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT2_NXT_SUVD_CGC_CTRL +#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SIT_SUVD_CGC_CTRL +#define SIT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SIT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SIT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SIT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SIT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SIT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SIT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SIT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SIT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SIT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SIT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SIT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SIT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SIT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SIT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SIT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SIT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SIT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SIT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SIT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SIT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SIT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SIT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SIT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SIT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SIT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SIT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SIT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SIT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SIT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SIT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SIT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SIT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SIT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SIT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SIT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SIT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SIT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SIT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SIT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SMPA_SUVD_CGC_CTRL +#define SMPA_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SMPA_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SMPA_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SMPA_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SMPA_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SMPA_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SMPA_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SMPA_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SMPA_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SMPA_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SMPA_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SMPA_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SMPA_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SMPA_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SMPA_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SMPA_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SMPA_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SMPA_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SMPA_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SMPA_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SMPA_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SMPA_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SMPA_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SMPA_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SMPA_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SMPA_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SMPA_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SMPA_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SMPA_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SMPA_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SMP_SUVD_CGC_CTRL +#define SMP_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SMP_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SMP_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SMP_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SMP_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SMP_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SMP_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SMP_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SMP_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SMP_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SMP_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SMP_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SMP_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SMP_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SMP_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SMP_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SMP_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SMP_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SMP_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SMP_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SMP_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SMP_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SMP_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SMP_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SMP_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SMP_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SMP_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SMP_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SMP_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SMP_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SMP_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SMP_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SMP_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SMP_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SMP_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SMP_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SMP_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SMP_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SMP_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SMP_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//SRE_SUVD_CGC_CTRL +#define SRE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define SRE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define SRE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define SRE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define SRE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define SRE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define SRE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define SRE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define SRE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define SRE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define SRE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define SRE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define SRE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define SRE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define SRE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define SRE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define SRE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define SRE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define SRE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define SRE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define SRE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define SRE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define SRE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define SRE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define SRE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define SRE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define SRE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define SRE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define SRE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define SRE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define SRE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define SRE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define SRE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define SRE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define SRE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define SRE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define SRE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define SRE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define SRE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define SRE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//UVD_SUVD_CGC_CTRL +#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define UVD_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define UVD_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define UVD_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define UVD_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define UVD_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L +//UVD_CGC_CTRL3 +#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY__SHIFT 0x0 +#define UVD_CGC_CTRL3__LCM0_MODE__SHIFT 0xb +#define UVD_CGC_CTRL3__LCM1_MODE__SHIFT 0xc +#define UVD_CGC_CTRL3__MIF_MODE__SHIFT 0xd +#define UVD_CGC_CTRL3__VREG_MODE__SHIFT 0xe +#define UVD_CGC_CTRL3__PE_MODE__SHIFT 0xf +#define UVD_CGC_CTRL3__PPU_MODE__SHIFT 0x10 +#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY_MASK 0x000000FFL +#define UVD_CGC_CTRL3__LCM0_MODE_MASK 0x00000800L +#define UVD_CGC_CTRL3__LCM1_MODE_MASK 0x00001000L +#define UVD_CGC_CTRL3__MIF_MODE_MASK 0x00002000L +#define UVD_CGC_CTRL3__VREG_MODE_MASK 0x00004000L +#define UVD_CGC_CTRL3__PE_MODE_MASK 0x00008000L +#define UVD_CGC_CTRL3__PPU_MODE_MASK 0x00010000L +//UVD_GPCOM_VCPU_DATA0 +#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0 +#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_GPCOM_VCPU_DATA1 +#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0 +#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_GPCOM_SYS_CMD +#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0 +#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1 +#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f +#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L +#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL +#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L +//UVD_GPCOM_SYS_DATA0 +#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0 +#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_GPCOM_SYS_DATA1 +#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0 +#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_VCPU_INT_EN +#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0 +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1 +#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2 +#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3 +#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4 +#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5 +#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6 +#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7 +#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9 +#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa +#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb +#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe +#define UVD_VCPU_INT_EN__SUVD_EN__SHIFT 0xf +#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10 +#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11 +#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12 +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17 +#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18 +#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19 +#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a +#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b +#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c +#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d +#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e +#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f +#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L +#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L +#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L +#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L +#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L +#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L +#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L +#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L +#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L +#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L +#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L +#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L +#define UVD_VCPU_INT_EN__SUVD_EN_MASK 0x00008000L +#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L +#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L +#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L +#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L +#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L +#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L +#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L +#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L +#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L +#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L +#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L +//UVD_VCPU_INT_STATUS +#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0 +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1 +#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2 +#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT__SHIFT 0x3 +#define UVD_VCPU_INT_STATUS__SW_RB1_INT__SHIFT 0x4 +#define UVD_VCPU_INT_STATUS__SW_RB2_INT__SHIFT 0x5 +#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6 +#define UVD_VCPU_INT_STATUS__SW_RB3_INT__SHIFT 0x7 +#define UVD_VCPU_INT_STATUS__SW_RB4_INT__SHIFT 0x9 +#define UVD_VCPU_INT_STATUS__SW_RB5_INT__SHIFT 0xa +#define UVD_VCPU_INT_STATUS__LBSI_INT__SHIFT 0xb +#define UVD_VCPU_INT_STATUS__UDEC_INT__SHIFT 0xc +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe +#define UVD_VCPU_INT_STATUS__SUVD_INT__SHIFT 0xf +#define UVD_VCPU_INT_STATUS__RPTR_WR_INT__SHIFT 0x10 +#define UVD_VCPU_INT_STATUS__JOB_START_INT__SHIFT 0x11 +#define UVD_VCPU_INT_STATUS__NJ_PF_INT__SHIFT 0x12 +#define UVD_VCPU_INT_STATUS__GPCOM_INT__SHIFT 0x14 +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17 +#define UVD_VCPU_INT_STATUS__IDCT_INT__SHIFT 0x18 +#define UVD_VCPU_INT_STATUS__MPRD_INT__SHIFT 0x19 +#define UVD_VCPU_INT_STATUS__AVM_INT__SHIFT 0x1a +#define UVD_VCPU_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b +#define UVD_VCPU_INT_STATUS__MIF_HWINT__SHIFT 0x1c +#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d +#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT__SHIFT 0x1e +#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT__SHIFT 0x1f +#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L +#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L +#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT_MASK 0x00000008L +#define UVD_VCPU_INT_STATUS__SW_RB1_INT_MASK 0x00000010L +#define UVD_VCPU_INT_STATUS__SW_RB2_INT_MASK 0x00000020L +#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L +#define UVD_VCPU_INT_STATUS__SW_RB3_INT_MASK 0x00000080L +#define UVD_VCPU_INT_STATUS__SW_RB4_INT_MASK 0x00000200L +#define UVD_VCPU_INT_STATUS__SW_RB5_INT_MASK 0x00000400L +#define UVD_VCPU_INT_STATUS__LBSI_INT_MASK 0x00000800L +#define UVD_VCPU_INT_STATUS__UDEC_INT_MASK 0x00001000L +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L +#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L +#define UVD_VCPU_INT_STATUS__SUVD_INT_MASK 0x00008000L +#define UVD_VCPU_INT_STATUS__RPTR_WR_INT_MASK 0x00010000L +#define UVD_VCPU_INT_STATUS__JOB_START_INT_MASK 0x00020000L +#define UVD_VCPU_INT_STATUS__NJ_PF_INT_MASK 0x00040000L +#define UVD_VCPU_INT_STATUS__GPCOM_INT_MASK 0x00100000L +#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L +#define UVD_VCPU_INT_STATUS__IDCT_INT_MASK 0x01000000L +#define UVD_VCPU_INT_STATUS__MPRD_INT_MASK 0x02000000L +#define UVD_VCPU_INT_STATUS__AVM_INT_MASK 0x04000000L +#define UVD_VCPU_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L +#define UVD_VCPU_INT_STATUS__MIF_HWINT_MASK 0x10000000L +#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L +#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT_MASK 0x40000000L +#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT_MASK 0x80000000L +//UVD_VCPU_INT_ACK +#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0 +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1 +#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2 +#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3 +#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4 +#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5 +#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6 +#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7 +#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9 +#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa +#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb +#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe +#define UVD_VCPU_INT_ACK__SUVD_ACK__SHIFT 0xf +#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10 +#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11 +#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12 +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17 +#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18 +#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19 +#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a +#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b +#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c +#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d +#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e +#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f +#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L +#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L +#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L +#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L +#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L +#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L +#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L +#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L +#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L +#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L +#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L +#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L +#define UVD_VCPU_INT_ACK__SUVD_ACK_MASK 0x00008000L +#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L +#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L +#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L +#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L +#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L +#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L +#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L +#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L +#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L +#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L +#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L +//UVD_VCPU_INT_ROUTE +#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0 +#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1 +#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2 +#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L +#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L +#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L +//UVD_DRV_FW_MSG +#define UVD_DRV_FW_MSG__MSG__SHIFT 0x0 +#define UVD_DRV_FW_MSG__MSG_MASK 0xFFFFFFFFL +//UVD_FW_DRV_MSG_ACK +#define UVD_FW_DRV_MSG_ACK__ACK__SHIFT 0x0 +#define UVD_FW_DRV_MSG_ACK__ACK_MASK 0x00000001L +//UVD_SUVD_INT_EN +#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN__SHIFT 0x0 +#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN__SHIFT 0x5 +#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN__SHIFT 0x6 +#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN__SHIFT 0xb +#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN__SHIFT 0xc +#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN__SHIFT 0x11 +#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN__SHIFT 0x12 +#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN__SHIFT 0x17 +#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN__SHIFT 0x18 +#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN__SHIFT 0x1d +#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN__SHIFT 0x1e +#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN_MASK 0x0000001FL +#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN_MASK 0x00000020L +#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN_MASK 0x000007C0L +#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN_MASK 0x00000800L +#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN_MASK 0x0001F000L +#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN_MASK 0x00020000L +#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN_MASK 0x007C0000L +#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN_MASK 0x00800000L +#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN_MASK 0x1F000000L +#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN_MASK 0x20000000L +#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN_MASK 0x40000000L +//UVD_SUVD_INT_STATUS +#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT__SHIFT 0x0 +#define UVD_SUVD_INT_STATUS__SRE_ERR_INT__SHIFT 0x5 +#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT__SHIFT 0x6 +#define UVD_SUVD_INT_STATUS__SIT_ERR_INT__SHIFT 0xb +#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT__SHIFT 0xc +#define UVD_SUVD_INT_STATUS__SMP_ERR_INT__SHIFT 0x11 +#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT__SHIFT 0x12 +#define UVD_SUVD_INT_STATUS__SCM_ERR_INT__SHIFT 0x17 +#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT__SHIFT 0x18 +#define UVD_SUVD_INT_STATUS__SDB_ERR_INT__SHIFT 0x1d +#define UVD_SUVD_INT_STATUS__FBC_ERR_INT__SHIFT 0x1e +#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT_MASK 0x0000001FL +#define UVD_SUVD_INT_STATUS__SRE_ERR_INT_MASK 0x00000020L +#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT_MASK 0x000007C0L +#define UVD_SUVD_INT_STATUS__SIT_ERR_INT_MASK 0x00000800L +#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT_MASK 0x0001F000L +#define UVD_SUVD_INT_STATUS__SMP_ERR_INT_MASK 0x00020000L +#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT_MASK 0x007C0000L +#define UVD_SUVD_INT_STATUS__SCM_ERR_INT_MASK 0x00800000L +#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT_MASK 0x1F000000L +#define UVD_SUVD_INT_STATUS__SDB_ERR_INT_MASK 0x20000000L +#define UVD_SUVD_INT_STATUS__FBC_ERR_INT_MASK 0x40000000L +//UVD_SUVD_INT_ACK +#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK__SHIFT 0x0 +#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK__SHIFT 0x5 +#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK__SHIFT 0x6 +#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK__SHIFT 0xb +#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK__SHIFT 0xc +#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK__SHIFT 0x11 +#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK__SHIFT 0x12 +#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK__SHIFT 0x17 +#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK__SHIFT 0x18 +#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK__SHIFT 0x1d +#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK__SHIFT 0x1e +#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK_MASK 0x0000001FL +#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK_MASK 0x00000020L +#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK_MASK 0x000007C0L +#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK_MASK 0x00000800L +#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK_MASK 0x0001F000L +#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK_MASK 0x00020000L +#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK_MASK 0x007C0000L +#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK_MASK 0x00800000L +#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK_MASK 0x1F000000L +#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK_MASK 0x20000000L +#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK_MASK 0x40000000L +//UVD_ENC_VCPU_INT_EN +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L +//UVD_ENC_VCPU_INT_STATUS +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT_MASK 0x00000004L +//UVD_ENC_VCPU_INT_ACK +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L +//UVD_MASTINT_EN +#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1 +#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2 +#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L +#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L +#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//UVD_SYS_INT_EN +#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0 +#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1 +#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2 +#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3 +#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6 +#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb +#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe +#define UVD_SYS_INT_EN__SUVD_EN__SHIFT 0xf +#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10 +#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17 +#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18 +#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19 +#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b +#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c +#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d +#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f +#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L +#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L +#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L +#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L +#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L +#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L +#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L +#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L +#define UVD_SYS_INT_EN__SUVD_EN_MASK 0x00008000L +#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L +#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L +#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L +#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L +#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L +#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L +#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L +#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L +//UVD_SYS_INT_STATUS +#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0 +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1 +#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2 +#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3 +#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6 +#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb +#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe +#define UVD_SYS_INT_STATUS__SUVD_INT__SHIFT 0xf +#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10 +#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12 +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17 +#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18 +#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19 +#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b +#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c +#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d +#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f +#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L +#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L +#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L +#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L +#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L +#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L +#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L +#define UVD_SYS_INT_STATUS__SUVD_INT_MASK 0x00008000L +#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L +#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L +#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L +#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L +#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L +#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L +#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L +#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L +//UVD_SYS_INT_ACK +#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0 +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1 +#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2 +#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3 +#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6 +#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb +#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe +#define UVD_SYS_INT_ACK__SUVD_ACK__SHIFT 0xf +#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10 +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17 +#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18 +#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19 +#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b +#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c +#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d +#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f +#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L +#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L +#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L +#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L +#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L +#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L +#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L +#define UVD_SYS_INT_ACK__SUVD_ACK_MASK 0x00008000L +#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L +#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L +#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L +#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L +#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L +#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L +#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L +//UVD_JOB_DONE +#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0 +#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L +//UVD_CBUF_ID +#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0 +#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL +//UVD_CONTEXT_ID +#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0 +#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL +//UVD_CONTEXT_ID2 +#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0 +#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL +//UVD_NO_OP +#define UVD_NO_OP__NO_OP__SHIFT 0x0 +#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL +//UVD_RB_BASE_LO +#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI +#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE +#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_BASE_LO2 +#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI2 +#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE2 +#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_BASE_LO3 +#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI3 +#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE3 +#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_BASE_LO4 +#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI4 +#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE4 +#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L +//UVD_OUT_RB_BASE_LO +#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_OUT_RB_BASE_HI +#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_OUT_RB_SIZE +#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_IOV_ACTIVE_FCN_ID +#define UVD_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0 +#define UVD_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f +#define UVD_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000003FL +#define UVD_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L +//UVD_IOV_MAILBOX +#define UVD_IOV_MAILBOX__MAILBOX__SHIFT 0x0 +#define UVD_IOV_MAILBOX__MAILBOX_MASK 0xFFFFFFFFL +//UVD_IOV_MAILBOX_RESP +#define UVD_IOV_MAILBOX_RESP__RESP__SHIFT 0x0 +#define UVD_IOV_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL +//UVD_RB_ARB_CTRL +#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0 +#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1 +#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2 +#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3 +#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4 +#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5 +#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6 +#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7 +#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8 +#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN__SHIFT 0x9 +#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L +#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L +#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L +#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L +#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L +#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L +#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L +#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L +#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L +#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN_MASK 0x00000200L +//UVD_CTX_INDEX +#define UVD_CTX_INDEX__INDEX__SHIFT 0x0 +#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL +//UVD_CTX_DATA +#define UVD_CTX_DATA__DATA__SHIFT 0x0 +#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL +//UVD_CXW_WR +#define UVD_CXW_WR__DAT__SHIFT 0x0 +#define UVD_CXW_WR__STAT__SHIFT 0x1f +#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL +#define UVD_CXW_WR__STAT_MASK 0x80000000L +//UVD_CXW_WR_INT_ID +#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0 +#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL +//UVD_CXW_WR_INT_CTX_ID +#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0 +#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL +//UVD_CXW_INT_ID +#define UVD_CXW_INT_ID__ID__SHIFT 0x0 +#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL +//UVD_MPEG2_ERROR +#define UVD_MPEG2_ERROR__STATUS__SHIFT 0x0 +#define UVD_MPEG2_ERROR__STATUS_MASK 0xFFFFFFFFL +//UVD_YBASE +#define UVD_YBASE__DUM__SHIFT 0x0 +#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL +//UVD_UVBASE +#define UVD_UVBASE__DUM__SHIFT 0x0 +#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL +//UVD_PITCH +#define UVD_PITCH__DUM__SHIFT 0x0 +#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL +//UVD_WIDTH +#define UVD_WIDTH__DUM__SHIFT 0x0 +#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL +//UVD_HEIGHT +#define UVD_HEIGHT__DUM__SHIFT 0x0 +#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL +//UVD_PICCOUNT +#define UVD_PICCOUNT__DUM__SHIFT 0x0 +#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL +//UVD_MPRD_INITIAL_XY +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X__SHIFT 0x0 +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y__SHIFT 0x10 +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X_MASK 0x00000FFFL +#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y_MASK 0x0FFF0000L +//UVD_MPEG2_CTRL +#define UVD_MPEG2_CTRL__EN__SHIFT 0x0 +#define UVD_MPEG2_CTRL__TRICK_MODE__SHIFT 0x1 +#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB__SHIFT 0x10 +#define UVD_MPEG2_CTRL__EN_MASK 0x00000001L +#define UVD_MPEG2_CTRL__TRICK_MODE_MASK 0x00000002L +#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB_MASK 0xFFFF0000L +//UVD_MB_CTL_BUF_BASE +#define UVD_MB_CTL_BUF_BASE__BASE__SHIFT 0x0 +#define UVD_MB_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL +//UVD_PIC_CTL_BUF_BASE +#define UVD_PIC_CTL_BUF_BASE__BASE__SHIFT 0x0 +#define UVD_PIC_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL +//UVD_DXVA_BUF_SIZE +#define UVD_DXVA_BUF_SIZE__PIC_SIZE__SHIFT 0x0 +#define UVD_DXVA_BUF_SIZE__MB_SIZE__SHIFT 0x10 +#define UVD_DXVA_BUF_SIZE__PIC_SIZE_MASK 0x0000FFFFL +#define UVD_DXVA_BUF_SIZE__MB_SIZE_MASK 0xFFFF0000L +//UVD_SCRATCH_NP +#define UVD_SCRATCH_NP__DATA__SHIFT 0x0 +#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL +//UVD_CLK_SWT_HANDSHAKE +#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE__SHIFT 0x0 +#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT__SHIFT 0x8 +#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE_MASK 0x00000003L +#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT_MASK 0x00000300L +//UVD_GP_SCRATCH0 +#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH1 +#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH2 +#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH3 +#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH4 +#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH5 +#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH6 +#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH7 +#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH8 +#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH9 +#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH10 +#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH11 +#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH12 +#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH13 +#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH14 +#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH15 +#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH16 +#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH17 +#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH18 +#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH19 +#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH20 +#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH21 +#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH22 +#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH23 +#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL +//UVD_AUDIO_RB_BASE_LO +#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_AUDIO_RB_BASE_HI +#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_AUDIO_RB_SIZE +#define UVD_AUDIO_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_AUDIO_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_VCPU_INT_STATUS2 +#define UVD_VCPU_INT_STATUS2__SW_RB6_INT__SHIFT 0x0 +#define UVD_VCPU_INT_STATUS2__SW_RB6_INT_MASK 0x00000001L +//UVD_VCPU_INT_ACK2 +#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK__SHIFT 0x0 +#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK_MASK 0x00000001L +//UVD_VCPU_INT_EN2 +#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN__SHIFT 0x0 +#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN_MASK 0x00000001L +//UVD_SUVD_CGC_STATUS2 +#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0 +#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1 +#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3 +#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4 +#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5 +#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6 +#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7 +#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8 +#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK__SHIFT 0x9 +#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK__SHIFT 0xa +#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK__SHIFT 0xb +#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK__SHIFT 0xc +#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK__SHIFT 0xd +#define UVD_SUVD_CGC_STATUS2__FBC_PCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_STATUS2__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L +#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L +#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L +#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L +#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L +#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L +#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L +#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L +#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK_MASK 0x00000200L +#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK_MASK 0x00000400L +#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK_MASK 0x00000800L +#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK_MASK 0x00001000L +#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK_MASK 0x00002000L +#define UVD_SUVD_CGC_STATUS2__FBC_PCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_STATUS2__FBC_CCLK_MASK 0x20000000L +//UVD_SUVD_INT_STATUS2 +#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0 +#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5 +#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6 +#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb +#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL +#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L +#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L +#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L +//UVD_SUVD_INT_EN2 +#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0 +#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5 +#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6 +#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb +#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL +#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L +#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L +#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L +//UVD_SUVD_INT_ACK2 +#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0 +#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5 +#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6 +#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb +#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL +#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L +#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L +#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L +//UVD_STATUS +#define UVD_STATUS__RBC_BUSY__SHIFT 0x0 +#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1 +#define UVD_STATUS__FILL_0__SHIFT 0x8 +#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10 +#define UVD_STATUS__DRM_BUSY__SHIFT 0x11 +#define UVD_STATUS__FILL_1__SHIFT 0x12 +#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f +#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L +#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL +#define UVD_STATUS__FILL_0_MASK 0x0000FF00L +#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L +#define UVD_STATUS__DRM_BUSY_MASK 0x00020000L +#define UVD_STATUS__FILL_1_MASK 0x7FFC0000L +#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L +//UVD_ENC_PIPE_BUSY +#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0 +#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1 +#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2 +#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3 +#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4 +#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5 +#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6 +#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7 +#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8 +#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9 +#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa +#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb +#define UVD_ENC_PIPE_BUSY__EFC_BUSY__SHIFT 0xc +#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY__SHIFT 0xd +#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY__SHIFT 0xe +#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY__SHIFT 0xf +#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10 +#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11 +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12 +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13 +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14 +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15 +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16 +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e +#define UVD_ENC_PIPE_BUSY__SAOE_BUSY__SHIFT 0x1f +#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L +#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L +#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L +#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L +#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L +#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L +#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L +#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L +#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L +#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L +#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L +#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L +#define UVD_ENC_PIPE_BUSY__EFC_BUSY_MASK 0x00001000L +#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY_MASK 0x00002000L +#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY_MASK 0x00004000L +#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY_MASK 0x00008000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L +#define UVD_ENC_PIPE_BUSY__SAOE_BUSY_MASK 0x80000000L +//UVD_FW_POWER_STATUS +#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF__SHIFT 0x0 +#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF__SHIFT 0x1 +#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF__SHIFT 0x2 +#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF__SHIFT 0x3 +#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF__SHIFT 0x4 +#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF__SHIFT 0x5 +#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF__SHIFT 0x6 +#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF__SHIFT 0x7 +#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF__SHIFT 0x8 +#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF__SHIFT 0x9 +#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF__SHIFT 0xa +#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF_MASK 0x00000001L +#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF_MASK 0x00000002L +#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF_MASK 0x00000004L +#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF_MASK 0x00000008L +#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF_MASK 0x00000010L +#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF_MASK 0x00000020L +#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF_MASK 0x00000040L +#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF_MASK 0x00000080L +#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF_MASK 0x00000100L +#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF_MASK 0x00000200L +#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF_MASK 0x00000400L +//UVD_CNTL +#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11 +#define UVD_CNTL__SUVD_EN__SHIFT 0x13 +#define UVD_CNTL__CABAC_MB_ACC__SHIFT 0x1c +#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS__SHIFT 0x1f +#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L +#define UVD_CNTL__SUVD_EN_MASK 0x00080000L +#define UVD_CNTL__CABAC_MB_ACC_MASK 0x10000000L +#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS_MASK 0x80000000L +//UVD_SOFT_RESET +#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0 +#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1 +#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2 +#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3 +#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4 +#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6 +#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7 +#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8 +#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9 +#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa +#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb +#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc +#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd +#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe +#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf +#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10 +#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11 +#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12 +#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13 +#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14 +#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15 +#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16 +#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17 +#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18 +#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19 +#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a +#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b +#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c +#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d +#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e +#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f +#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L +#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L +#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L +#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L +#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L +#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L +#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L +#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L +#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L +#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L +#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L +#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L +#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L +#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L +#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L +#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L +#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L +#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L +#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L +#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L +#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L +#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L +#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L +#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L +#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L +#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L +#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L +#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L +#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L +#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L +#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L +//UVD_SOFT_RESET2 +#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0 +#define UVD_SOFT_RESET2__PPU_SOFT_RESET__SHIFT 0x1 +#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10 +#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L +#define UVD_SOFT_RESET2__PPU_SOFT_RESET_MASK 0x00000002L +#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L +#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_MMSCH_SOFT_RESET +#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0 +#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1 +#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f +#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L +#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L +#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L +//UVD_WIG_CTRL +#define UVD_WIG_CTRL__AVM_SOFT_RESET__SHIFT 0x0 +#define UVD_WIG_CTRL__ACAP_SOFT_RESET__SHIFT 0x1 +#define UVD_WIG_CTRL__WIG_SOFT_RESET__SHIFT 0x2 +#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON__SHIFT 0x3 +#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON__SHIFT 0x4 +#define UVD_WIG_CTRL__AVM_SOFT_RESET_MASK 0x00000001L +#define UVD_WIG_CTRL__ACAP_SOFT_RESET_MASK 0x00000002L +#define UVD_WIG_CTRL__WIG_SOFT_RESET_MASK 0x00000004L +#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON_MASK 0x00000008L +#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON_MASK 0x00000010L +//UVD_CGC_STATUS +#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0 +#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1 +#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2 +#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3 +#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4 +#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5 +#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6 +#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7 +#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8 +#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9 +#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa +#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb +#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc +#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd +#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe +#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf +#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10 +#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11 +#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12 +#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13 +#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14 +#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15 +#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16 +#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17 +#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18 +#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19 +#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a +#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b +#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c +#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d +#define UVD_CGC_STATUS__LRBBM_DCLK__SHIFT 0x1e +#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f +#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L +#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L +#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L +#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L +#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L +#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L +#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L +#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L +#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L +#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L +#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L +#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L +#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L +#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L +#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L +#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L +#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L +#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L +#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L +#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L +#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L +#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L +#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L +#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L +#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L +#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L +#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L +#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L +#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L +#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L +#define UVD_CGC_STATUS__LRBBM_DCLK_MASK 0x40000000L +#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L +//UVD_CGC_UDEC_STATUS +#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0 +#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1 +#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2 +#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3 +#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4 +#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5 +#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6 +#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7 +#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8 +#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9 +#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa +#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb +#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc +#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd +#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe +#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L +#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L +#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L +#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L +#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L +#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L +#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L +#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L +#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L +#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L +#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L +#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L +#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L +#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L +#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L +//UVD_SUVD_CGC_STATUS +#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0 +#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1 +#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2 +#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3 +#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4 +#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5 +#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6 +#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7 +#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9 +#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa +#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb +#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc +#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd +#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe +#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf +#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10 +#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13 +#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14 +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15 +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16 +#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17 +#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18 +#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19 +#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a +#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b +#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e +#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f +#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L +#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L +#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L +#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L +#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L +#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L +#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L +#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L +#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L +#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L +#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L +#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L +#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L +#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L +#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L +#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L +#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L +#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L +#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L +#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L +#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L +#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L +#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L +#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L +#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L +#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L +//UVD_GPCOM_VCPU_CMD +#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0 +#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1 +#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f +#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L +#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL +#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L + + +// addressBlock: uvd_vcn_cdefe_cdefe_broadcast_dec0 +//CDEFE_SUVD_CGC_GATE +#define CDEFE_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define CDEFE_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define CDEFE_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define CDEFE_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define CDEFE_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define CDEFE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define CDEFE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define CDEFE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define CDEFE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define CDEFE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define CDEFE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define CDEFE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define CDEFE_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define CDEFE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define CDEFE_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define CDEFE_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define CDEFE_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define CDEFE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define CDEFE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define CDEFE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define CDEFE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define CDEFE_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define CDEFE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define CDEFE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b +#define CDEFE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c +#define CDEFE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define CDEFE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e +#define CDEFE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f +#define CDEFE_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define CDEFE_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define CDEFE_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define CDEFE_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define CDEFE_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define CDEFE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define CDEFE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define CDEFE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define CDEFE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define CDEFE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define CDEFE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define CDEFE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define CDEFE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define CDEFE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define CDEFE_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define CDEFE_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define CDEFE_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define CDEFE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define CDEFE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define CDEFE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define CDEFE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define CDEFE_SUVD_CGC_GATE__EFC_MASK 0x02000000L +#define CDEFE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define CDEFE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L +#define CDEFE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L +#define CDEFE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define CDEFE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L +#define CDEFE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L +//CDEFE_SUVD_CGC_GATE2 +#define CDEFE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 +#define CDEFE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define CDEFE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define CDEFE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 +#define CDEFE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 +#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5 +#define CDEFE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6 +#define CDEFE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7 +#define CDEFE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8 +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9 +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb +#define CDEFE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L +#define CDEFE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define CDEFE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define CDEFE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L +#define CDEFE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L +#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L +#define CDEFE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L +#define CDEFE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L +#define CDEFE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L +#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L +//CDEFE_SUVD_CGC_CTRL +#define CDEFE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define CDEFE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define CDEFE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define CDEFE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define CDEFE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define CDEFE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define CDEFE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define CDEFE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define CDEFE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb +#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc +#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd +#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 +#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 +#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12 +#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13 +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14 +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15 +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16 +#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e +#define CDEFE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define CDEFE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define CDEFE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define CDEFE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define CDEFE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define CDEFE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define CDEFE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define CDEFE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define CDEFE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L +#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L +#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L +#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L +#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L +#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L +#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L +#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L +#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L + + +// addressBlock: uvd_ecpudec +//UVD_VCPU_CACHE_OFFSET0 +#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE0 +#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET1 +#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE1 +#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET2 +#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE2 +#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET3 +#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE3 +#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET4 +#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE4 +#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET5 +#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE5 +#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET6 +#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE6 +#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET7 +#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE7 +#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET8 +#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE8 +#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL +//UVD_VCPU_NONCACHE_OFFSET0 +#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL +//UVD_VCPU_NONCACHE_SIZE0 +#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL +//UVD_VCPU_NONCACHE_OFFSET1 +#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL +//UVD_VCPU_NONCACHE_SIZE1 +#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL +//UVD_VCPU_CNTL +#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0 +#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x4 +#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5 +#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6 +#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7 +#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8 +#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9 +#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa +#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb +#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0xd +#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10 +#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12 +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14 +#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c +#define UVD_VCPU_CNTL__RUNSTALL__SHIFT 0x1d +#define UVD_VCPU_CNTL__SRE_CMDIF_DRST__SHIFT 0x1e +#define UVD_VCPU_CNTL__SRE_CMDIF_VRST__SHIFT 0x1f +#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL +#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L +#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L +#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L +#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L +#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L +#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L +#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L +#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L +#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000E000L +#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L +#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L +#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L +#define UVD_VCPU_CNTL__RUNSTALL_MASK 0x20000000L +#define UVD_VCPU_CNTL__SRE_CMDIF_DRST_MASK 0x40000000L +#define UVD_VCPU_CNTL__SRE_CMDIF_VRST_MASK 0x80000000L +//UVD_VCPU_PRID +#define UVD_VCPU_PRID__PRID__SHIFT 0x0 +#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL +//UVD_VCPU_TRCE +#define UVD_VCPU_TRCE__PC__SHIFT 0x0 +#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL +//UVD_VCPU_TRCE_RD +#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0 +#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL +//UVD_VCPU_IND_INDEX +#define UVD_VCPU_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_VCPU_IND_INDEX__INDEX_MASK 0x000001FFL +//UVD_VCPU_IND_DATA +#define UVD_VCPU_IND_DATA__DATA__SHIFT 0x0 +#define UVD_VCPU_IND_DATA__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: uvd_lmi_adpdec +//UVD_LMI_RE_64BIT_BAR_LOW +#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RE_64BIT_BAR_HIGH +#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_IT_64BIT_BAR_LOW +#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_IT_64BIT_BAR_HIGH +#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MP_64BIT_BAR_LOW +#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MP_64BIT_BAR_HIGH +#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_CM_64BIT_BAR_LOW +#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_CM_64BIT_BAR_HIGH +#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_DB_64BIT_BAR_LOW +#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_DB_64BIT_BAR_HIGH +#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_DBW_64BIT_BAR_LOW +#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_DBW_64BIT_BAR_HIGH +#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_IDCT_64BIT_BAR_LOW +#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_IDCT_64BIT_BAR_HIGH +#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S0_64BIT_BAR_LOW +#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S0_64BIT_BAR_HIGH +#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S1_64BIT_BAR_LOW +#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_S1_64BIT_BAR_HIGH +#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_DBW_64BIT_BAR_LOW +#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH +#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_RB_64BIT_BAR_LOW +#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_RB_64BIT_BAR_HIGH +#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_IB_64BIT_BAR_LOW +#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_IB_64BIT_BAR_HIGH +#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_LBSI_64BIT_BAR_LOW +#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_LBSI_64BIT_BAR_HIGH +#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW +#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW +#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_CENC_64BIT_BAR_LOW +#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_CENC_64BIT_BAR_HIGH +#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_SRE_64BIT_BAR_LOW +#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_SRE_64BIT_BAR_HIGH +#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_DBW_64BIT_BAR_LOW +#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_DBW_64BIT_BAR_HIGH +#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP0_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP1_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP2_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP3_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD0_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD1_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD2_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD3_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD4_64BIT_BAR_LOW +#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH +#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR_64BIT_BAR_LOW +#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH +#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_SPH_64BIT_BAR_HIGH +#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_ADP_ATOMIC_CONFIG +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE__SHIFT 0x0 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE__SHIFT 0x4 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE__SHIFT 0x8 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE__SHIFT 0xc +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG__SHIFT 0x10 +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE_MASK 0x0000000FL +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE_MASK 0x000000F0L +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE_MASK 0x00000F00L +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE_MASK 0x0000F000L +#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG_MASK 0x000F0000L +//UVD_LMI_ARB_CTRL2 +#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2 +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6 +#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa +#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14 +#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L +#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L +#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L +//UVD_LMI_VCPU_CACHE_VMIDS_MULTI +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L +//UVD_LMI_VCPU_NC_VMIDS_MULTI +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L +//UVD_LMI_LAT_CTRL +#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0 +#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8 +#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9 +#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa +#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb +#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10 +#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL +#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L +#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L +#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L +#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L +#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L +//UVD_LMI_LAT_CNTR +#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0 +#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8 +#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL +#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L +//UVD_LMI_AVG_LAT_CNTR +#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0 +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8 +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10 +#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L +//UVD_LMI_SPH +#define UVD_LMI_SPH__ADDR__SHIFT 0x0 +#define UVD_LMI_SPH__STS__SHIFT 0x1c +#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e +#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f +#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL +#define UVD_LMI_SPH__STS_MASK 0x30000000L +#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L +#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L +//UVD_LMI_VCPU_CACHE_VMID +#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL +//UVD_LMI_CTRL2 +#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0 +#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1 +#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2 +#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3 +#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4 +#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7 +#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8 +#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9 +#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb +#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd +#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe +#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf +#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10 +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11 +#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19 +#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a +#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b +#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L +#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L +#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L +#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L +#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L +#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L +#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L +#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L +#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L +#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L +#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L +#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L +#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L +#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L +#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L +#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L +//UVD_LMI_URGENT_CTRL +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2 +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12 +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L +//UVD_LMI_CTRL +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0 +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8 +#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9 +#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb +#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc +#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd +#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe +#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf +#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x14 +#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15 +#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16 +#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17 +#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18 +#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19 +#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a +#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b +#define UVD_LMI_CTRL__MC_BLK_RST__SHIFT 0x1c +#define UVD_LMI_CTRL__UMC_BLK_RST__SHIFT 0x1d +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L +#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L +#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L +#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L +#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L +#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L +#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L +#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L +#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L +#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L +#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L +#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L +#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L +#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L +#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L +#define UVD_LMI_CTRL__MC_BLK_RST_MASK 0x10000000L +#define UVD_LMI_CTRL__UMC_BLK_RST_MASK 0x20000000L +//UVD_LMI_STATUS +#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0 +#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1 +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2 +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3 +#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6 +#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7 +#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8 +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9 +#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa +#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb +#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc +#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd +#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12 +#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13 +#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14 +#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15 +#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16 +#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L +#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L +#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L +#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L +#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L +#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L +#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L +#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L +#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L +#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L +#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L +#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L +#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L +//UVD_LMI_PERFMON_CTRL +#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0 +#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8 +#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L +#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L +//UVD_LMI_PERFMON_COUNT_LO +#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0 +#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL +//UVD_LMI_PERFMON_COUNT_HI +#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0 +#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL +//UVD_LMI_ADP_SWAP_CNTL +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x6 +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x8 +#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP__SHIFT 0xa +#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP__SHIFT 0xc +#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0xe +#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x10 +#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x12 +#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP__SHIFT 0x14 +#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x18 +#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x1c +#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x1e +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000C0L +#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L +#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000C00L +#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L +#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000C000L +#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L +#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000C0000L +#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP_MASK 0x00300000L +#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L +#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L +#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L +//UVD_LMI_RBC_RB_VMID +#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0 +#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL +//UVD_LMI_RBC_IB_VMID +#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0 +#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL +//UVD_LMI_MC_CREDITS +#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0 +#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8 +#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10 +#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18 +#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL +#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L +#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L +#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L +//UVD_LMI_ADP_IND_INDEX +#define UVD_LMI_ADP_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_LMI_ADP_IND_INDEX__INDEX_MASK 0x00001FFFL +//UVD_LMI_ADP_IND_DATA +#define UVD_LMI_ADP_IND_DATA__DATA__SHIFT 0x0 +#define UVD_LMI_ADP_IND_DATA__DATA_MASK 0xFFFFFFFFL +//UVD_LMI_ADP_PF_EN +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN__SHIFT 0x0 +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN__SHIFT 0x1 +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN__SHIFT 0x2 +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN_MASK 0x00000001L +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN_MASK 0x00000002L +#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN_MASK 0x00000004L +//UVD_LMI_PREF_CTRL +#define UVD_LMI_PREF_CTRL__PREF_RST__SHIFT 0x0 +#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS__SHIFT 0x1 +#define UVD_LMI_PREF_CTRL__PREF_WSTRB__SHIFT 0x2 +#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE__SHIFT 0x3 +#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE__SHIFT 0x4 +#define UVD_LMI_PREF_CTRL__PREF_SIZE__SHIFT 0x13 +#define UVD_LMI_PREF_CTRL__PREF_RST_MASK 0x00000001L +#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS_MASK 0x00000002L +#define UVD_LMI_PREF_CTRL__PREF_WSTRB_MASK 0x00000004L +#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE_MASK 0x00000008L +#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE_MASK 0x00000070L +#define UVD_LMI_PREF_CTRL__PREF_SIZE_MASK 0xFFF80000L + + +// addressBlock: uvd_uvd_jpeg0_jpegnpdec +//UVD_JPEG_CNTL +#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1 +#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2 +#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8 +#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L +#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L +#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L +//UVD_JPEG_RB_BASE +#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0 +#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6 +#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL +#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L +//UVD_JPEG_RB_WPTR +#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L +//UVD_JPEG_RB_RPTR +#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L +//UVD_JPEG_RB_SIZE +#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L +//UVD_JPEG_DEC_CNT +#define UVD_JPEG_DEC_CNT__DECODE_COUNT__SHIFT 0x0 +#define UVD_JPEG_DEC_CNT__DECODE_COUNT_MASK 0xFFFFFFFFL +//UVD_JPEG_SPS_INFO +#define UVD_JPEG_SPS_INFO__PIC_WIDTH__SHIFT 0x0 +#define UVD_JPEG_SPS_INFO__PIC_HEIGHT__SHIFT 0x10 +#define UVD_JPEG_SPS_INFO__PIC_WIDTH_MASK 0x0000FFFFL +#define UVD_JPEG_SPS_INFO__PIC_HEIGHT_MASK 0xFFFF0000L +//UVD_JPEG_SPS1_INFO +#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC__SHIFT 0x0 +#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT__SHIFT 0x3 +#define UVD_JPEG_SPS1_INFO__OUT_FMT_422__SHIFT 0x4 +#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC_MASK 0x00000007L +#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT_MASK 0x00000008L +#define UVD_JPEG_SPS1_INFO__OUT_FMT_422_MASK 0x00000010L +//UVD_JPEG_RE_TIMER +#define UVD_JPEG_RE_TIMER__TIMER_OUT__SHIFT 0x0 +#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN__SHIFT 0x10 +#define UVD_JPEG_RE_TIMER__TIMER_OUT_MASK 0x000000FFL +#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN_MASK 0x00010000L +//UVD_JPEG_DEC_SCRATCH0 +#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL +//UVD_JPEG_INT_EN +#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0 +#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1 +#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2 +#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6 +#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7 +#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8 +#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9 +#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa +#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb +#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc +#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd +#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe +#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf +#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L +#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L +#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L +#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L +#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L +#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L +#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L +#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L +#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L +#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L +#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L +#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L +#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L +//UVD_JPEG_INT_STAT +#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0 +#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1 +#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2 +#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6 +#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7 +#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8 +#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9 +#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa +#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb +#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc +#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd +#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe +#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf +#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L +#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L +#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L +#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L +#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L +#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L +#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L +#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L +#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L +#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L +#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L +#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L +#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L +//UVD_JPEG_TIER_CNTL0 +#define UVD_JPEG_TIER_CNTL0__TIER_SEL__SHIFT 0x0 +#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID__SHIFT 0x2 +#define UVD_JPEG_TIER_CNTL0__U_COMP_ID__SHIFT 0x4 +#define UVD_JPEG_TIER_CNTL0__V_COMP_ID__SHIFT 0x6 +#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC__SHIFT 0x8 +#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC__SHIFT 0xb +#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC__SHIFT 0xe +#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC__SHIFT 0x11 +#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC__SHIFT 0x14 +#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC__SHIFT 0x17 +#define UVD_JPEG_TIER_CNTL0__Y_TQ__SHIFT 0x1a +#define UVD_JPEG_TIER_CNTL0__U_TQ__SHIFT 0x1c +#define UVD_JPEG_TIER_CNTL0__V_TQ__SHIFT 0x1e +#define UVD_JPEG_TIER_CNTL0__TIER_SEL_MASK 0x00000003L +#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID_MASK 0x0000000CL +#define UVD_JPEG_TIER_CNTL0__U_COMP_ID_MASK 0x00000030L +#define UVD_JPEG_TIER_CNTL0__V_COMP_ID_MASK 0x000000C0L +#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC_MASK 0x00000700L +#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC_MASK 0x00003800L +#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC_MASK 0x0001C000L +#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC_MASK 0x000E0000L +#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC_MASK 0x00700000L +#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC_MASK 0x03800000L +#define UVD_JPEG_TIER_CNTL0__Y_TQ_MASK 0x0C000000L +#define UVD_JPEG_TIER_CNTL0__U_TQ_MASK 0x30000000L +#define UVD_JPEG_TIER_CNTL0__V_TQ_MASK 0xC0000000L +//UVD_JPEG_TIER_CNTL1 +#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH__SHIFT 0x0 +#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT__SHIFT 0x10 +#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH_MASK 0x0000FFFFL +#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT_MASK 0xFFFF0000L +//UVD_JPEG_TIER_CNTL2 +#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL__SHIFT 0x0 +#define UVD_JPEG_TIER_CNTL2__TBL_TYPE__SHIFT 0x1 +#define UVD_JPEG_TIER_CNTL2__TQ__SHIFT 0x2 +#define UVD_JPEG_TIER_CNTL2__TH__SHIFT 0x4 +#define UVD_JPEG_TIER_CNTL2__TC__SHIFT 0x6 +#define UVD_JPEG_TIER_CNTL2__TD__SHIFT 0x7 +#define UVD_JPEG_TIER_CNTL2__TA__SHIFT 0xa +#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN__SHIFT 0xe +#define UVD_JPEG_TIER_CNTL2__DRI_VAL__SHIFT 0x10 +#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL_MASK 0x00000001L +#define UVD_JPEG_TIER_CNTL2__TBL_TYPE_MASK 0x00000002L +#define UVD_JPEG_TIER_CNTL2__TQ_MASK 0x0000000CL +#define UVD_JPEG_TIER_CNTL2__TH_MASK 0x00000030L +#define UVD_JPEG_TIER_CNTL2__TC_MASK 0x00000040L +#define UVD_JPEG_TIER_CNTL2__TD_MASK 0x00000380L +#define UVD_JPEG_TIER_CNTL2__TA_MASK 0x00001C00L +#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN_MASK 0x00004000L +#define UVD_JPEG_TIER_CNTL2__DRI_VAL_MASK 0xFFFF0000L +//UVD_JPEG_TIER_STATUS +#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE__SHIFT 0x0 +#define UVD_JPEG_TIER_STATUS__DECODE_DONE__SHIFT 0x1 +#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE_MASK 0x00000001L +#define UVD_JPEG_TIER_STATUS__DECODE_DONE_MASK 0x00000002L + + +// addressBlock: uvd_uvd_jpeg_sclk0_jpegnpsclkdec +//UVD_JPEG_OUTBUF_CNTL +#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT__SHIFT 0x0 +#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN__SHIFT 0x2 +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX__SHIFT 0x6 +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT__SHIFT 0x7 +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER__SHIFT 0x9 +#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK__SHIFT 0x10 +#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT_MASK 0x00000003L +#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN_MASK 0x00000004L +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX_MASK 0x00000040L +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT_MASK 0x00000180L +#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER_MASK 0x00001E00L +#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK_MASK 0x00010000L +//UVD_JPEG_OUTBUF_WPTR +#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR__SHIFT 0x0 +#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR_MASK 0xFFFFFFFFL +//UVD_JPEG_OUTBUF_RPTR +#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR__SHIFT 0x0 +#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR_MASK 0xFFFFFFFFL +//UVD_JPEG_PITCH +#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0 +#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL +//UVD_JPEG_UV_PITCH +#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0 +#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL +//JPEG_DEC_Y_GFX8_TILING_SURFACE +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L +//JPEG_DEC_UV_GFX8_TILING_SURFACE +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L +//JPEG_DEC_GFX8_ADDR_CONFIG +#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4 +#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L +//JPEG_DEC_Y_GFX10_TILING_SURFACE +#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_DEC_UV_GFX10_TILING_SURFACE +#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_DEC_GFX10_ADDR_CONFIG +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//JPEG_DEC_ADDR_MODE +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0 +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2 +#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL +#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L +//UVD_JPEG_OUTPUT_XY +#define UVD_JPEG_OUTPUT_XY__OUTPUT_X__SHIFT 0x0 +#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y__SHIFT 0x10 +#define UVD_JPEG_OUTPUT_XY__OUTPUT_X_MASK 0x00003FFFL +#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y_MASK 0x3FFF0000L +//UVD_JPEG_GPCOM_CMD +#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1 +#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL +//UVD_JPEG_GPCOM_DATA0 +#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0 +#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_GPCOM_DATA1 +#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0 +#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_JPEG_SCRATCH1 +#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0 +#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL +//UVD_JPEG_DEC_SOFT_RST +#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0 +#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10 +#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L +#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L + + +// addressBlock: uvd_uvd_jrbc0_uvd_jrbc_dec +//UVD_JRBC_RB_WPTR +#define UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC_RB_CNTL +#define UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC_IB_SIZE +#define UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC_URGENT_CNTL +#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC_RB_REF_DATA +#define UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC_SOFT_RESET +#define UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC_STATUS +#define UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC_RB_RPTR +#define UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC_IB_REF_DATA +#define UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JPEG_PREEMPT_CMD +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC_RB_SIZE +#define UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC_SCRATCH0 +#define UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: uvd_uvd_jmi0_uvd_jmi_dec +//UVD_JPEG_DEC_PF_CTRL +#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0 +#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1 +#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L +#define UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L +//UVD_LMI_JRBC_CTRL +#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_LMI_JPEG_CTRL +#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//JPEG_LMI_DROP +#define JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0 +#define JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1 +#define JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2 +#define JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3 +#define JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L +#define JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L +#define JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L +#define JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L +//UVD_LMI_JRBC_IB_VMID +#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_LMI_JRBC_RB_VMID +#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_LMI_JPEG_VMID +#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI_ATOMIC_CNTL +#define UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0 +#define UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1 +#define UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5 +#define UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6 +#define UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7 +#define UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb +#define UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L +#define UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL +#define UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L +#define UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L +#define UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L +#define UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L +//UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW +#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH +#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI_ATOMIC_CNTL2 +#define UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10 +#define UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18 +#define UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L +#define UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L + + +// addressBlock: uvd_uvd_jmi_common_dec +//UVD_JADP_MCIF_URGENT_CTRL +#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK__SHIFT 0x0 +#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK__SHIFT 0x6 +#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER__SHIFT 0xb +#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP__SHIFT 0x11 +#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP__SHIFT 0x15 +#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN__SHIFT 0x19 +#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN__SHIFT 0x1a +#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK_MASK 0x0000003FL +#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK_MASK 0x000007C0L +#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER_MASK 0x0001F800L +#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP_MASK 0x001E0000L +#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP_MASK 0x01E00000L +#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN_MASK 0x02000000L +#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN_MASK 0x04000000L +//UVD_JMI_URGENT_CTRL +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0 +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x4 +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x10 +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0x14 +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x000000F0L +#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00010000L +#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00F00000L +//UVD_JMI_CTRL +#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0 +#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1 +#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2 +#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8 +#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10 +#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L +#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L +#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L +#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L +#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L +//JPEG_MEMCHECK_CLAMPING_CNTL +#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN__SHIFT 0x0 +#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN_MASK 0x00000001L +//JPEG_MEMCHECK_SAFE_ADDR +#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR__SHIFT 0x0 +#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR_MASK 0xFFFFFFFFL +//JPEG_MEMCHECK_SAFE_ADDR_64BIT +#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT__SHIFT 0x0 +#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT_MASK 0xFFFFFFFFL +//UVD_JMI_LAT_CTRL +#define UVD_JMI_LAT_CTRL__SCALE__SHIFT 0x0 +#define UVD_JMI_LAT_CTRL__MAX_START__SHIFT 0x8 +#define UVD_JMI_LAT_CTRL__MIN_START__SHIFT 0x9 +#define UVD_JMI_LAT_CTRL__AVG_START__SHIFT 0xa +#define UVD_JMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb +#define UVD_JMI_LAT_CTRL__SKIP__SHIFT 0x10 +#define UVD_JMI_LAT_CTRL__SCALE_MASK 0x000000FFL +#define UVD_JMI_LAT_CTRL__MAX_START_MASK 0x00000100L +#define UVD_JMI_LAT_CTRL__MIN_START_MASK 0x00000200L +#define UVD_JMI_LAT_CTRL__AVG_START_MASK 0x00000400L +#define UVD_JMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L +#define UVD_JMI_LAT_CTRL__SKIP_MASK 0x000F0000L +//UVD_JMI_LAT_CNTR +#define UVD_JMI_LAT_CNTR__MAX_LAT__SHIFT 0x0 +#define UVD_JMI_LAT_CNTR__MIN_LAT__SHIFT 0x8 +#define UVD_JMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL +#define UVD_JMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L +//UVD_JMI_AVG_LAT_CNTR +#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0 +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8 +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10 +#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L +#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L +//UVD_JMI_PERFMON_CTRL +#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0 +#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8 +#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L +#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L +//UVD_JMI_PERFMON_COUNT_LO +#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0 +#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL +//UVD_JMI_PERFMON_COUNT_HI +#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0 +#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL +//UVD_JMI_CLEAN_STATUS +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN__SHIFT 0x0 +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW__SHIFT 0x1 +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN__SHIFT 0x2 +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW__SHIFT 0x3 +#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING__SHIFT 0x4 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN__SHIFT 0x8 +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN__SHIFT 0x10 +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_MASK 0x00000001L +#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW_MASK 0x00000002L +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_MASK 0x00000004L +#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW_MASK 0x00000008L +#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING_MASK 0x00000010L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN_MASK 0x00000100L +#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN_MASK 0x00010000L +//UVD_JMI_CNTL +#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0 +#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8 +#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L +#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L + + +// addressBlock: uvd_uvd_jpeg_common_dec +//JPEG_SOFT_RESET_STATUS +#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS__SHIFT 0x0 +#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS__SHIFT 0x8 +#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x11 +#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x12 +#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x18 +#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS_MASK 0x00000001L +#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS_MASK 0x00000100L +#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00020000L +#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00040000L +#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x01000000L +//JPEG_SYS_INT_EN +#define JPEG_SYS_INT_EN__DJPEG0_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_EN__DJRBC0__SHIFT 0x8 +#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT__SHIFT 0x10 +#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL__SHIFT 0x18 +#define JPEG_SYS_INT_EN__DJPEG0_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_EN__DJRBC0_MASK 0x00000100L +#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT_MASK 0x00010000L +#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL_MASK 0x01000000L +//JPEG_SYS_INT_EN1 +#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT__SHIFT 0x0 +#define JPEG_SYS_INT_EN1__EJPEG_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_EN1__EJRBC__SHIFT 0x2 +#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL__SHIFT 0x3 +#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT_MASK 0x00000001L +#define JPEG_SYS_INT_EN1__EJPEG_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_EN1__EJRBC_MASK 0x00000004L +#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL_MASK 0x00000008L +//JPEG_SYS_INT_STATUS +#define JPEG_SYS_INT_STATUS__DJPEG0_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_STATUS__DJRBC0__SHIFT 0x8 +#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT__SHIFT 0x10 +#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL__SHIFT 0x18 +#define JPEG_SYS_INT_STATUS__DJPEG0_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_STATUS__DJRBC0_MASK 0x00000100L +#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT_MASK 0x00010000L +#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL_MASK 0x01000000L +//JPEG_SYS_INT_STATUS1 +#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT__SHIFT 0x0 +#define JPEG_SYS_INT_STATUS1__EJPEG_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_STATUS1__EJRBC__SHIFT 0x2 +#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL__SHIFT 0x3 +#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT_MASK 0x00000001L +#define JPEG_SYS_INT_STATUS1__EJPEG_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_STATUS1__EJRBC_MASK 0x00000004L +#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL_MASK 0x00000008L +//JPEG_SYS_INT_ACK +#define JPEG_SYS_INT_ACK__DJPEG0_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_ACK__DJRBC0__SHIFT 0x8 +#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT__SHIFT 0x10 +#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL__SHIFT 0x18 +#define JPEG_SYS_INT_ACK__DJPEG0_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_ACK__DJRBC0_MASK 0x00000100L +#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT_MASK 0x00010000L +#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL_MASK 0x01000000L +//JPEG_SYS_INT_ACK1 +#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT__SHIFT 0x0 +#define JPEG_SYS_INT_ACK1__EJPEG_CORE__SHIFT 0x1 +#define JPEG_SYS_INT_ACK1__EJRBC__SHIFT 0x2 +#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL__SHIFT 0x3 +#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT_MASK 0x00000001L +#define JPEG_SYS_INT_ACK1__EJPEG_CORE_MASK 0x00000002L +#define JPEG_SYS_INT_ACK1__EJRBC_MASK 0x00000004L +#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL_MASK 0x00000008L +//JPEG_MEMCHECK_SYS_INT_EN +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN_MASK 0x01000000L +//JPEG_MEMCHECK_SYS_INT_EN1 +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN_MASK 0x00000020L +//JPEG_MEMCHECK_SYS_INT_STAT +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR_MASK 0x01000000L +//JPEG_MEMCHECK_SYS_INT_STAT1 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR_MASK 0x01000000L +//JPEG_MEMCHECK_SYS_INT_STAT2 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR_MASK 0x00000800L +//JPEG_MEMCHECK_SYS_INT_ACK +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR_MASK 0x01000000L +//JPEG_MEMCHECK_SYS_INT_ACK1 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR__SHIFT 0x10 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR__SHIFT 0x18 +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR_MASK 0x00010000L +#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR_MASK 0x01000000L +//JPEG_MEMCHECK_SYS_INT_ACK2 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR__SHIFT 0x0 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR__SHIFT 0x1 +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR__SHIFT 0x2 +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR__SHIFT 0x3 +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR__SHIFT 0x4 +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR__SHIFT 0x5 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR__SHIFT 0x6 +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR__SHIFT 0x7 +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR__SHIFT 0x8 +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR__SHIFT 0x9 +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR__SHIFT 0xa +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR__SHIFT 0xb +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR_MASK 0x00000001L +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR_MASK 0x00000002L +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR_MASK 0x00000004L +#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR_MASK 0x00000008L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR_MASK 0x00000010L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR_MASK 0x00000020L +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR_MASK 0x00000040L +#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR_MASK 0x00000080L +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR_MASK 0x00000100L +#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR_MASK 0x00000200L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR_MASK 0x00000400L +#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR_MASK 0x00000800L +//JPEG_MASTINT_EN +#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//JPEG_IH_CTRL +#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0 +#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1 +#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2 +#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3 +#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7 +#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13 +#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L +#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L +#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L +#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L +#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L +#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L +//JRBBM_ARB_CTRL +#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x0 +#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x1 +#define JRBBM_ARB_CTRL__DJRBC0_DROP__SHIFT 0x2 +#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000001L +#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000002L +#define JRBBM_ARB_CTRL__DJRBC0_DROP_MASK 0x00000004L + + +// addressBlock: uvd_uvd_jpeg_common_sclk_dec +//JPEG_CGC_GATE +#define JPEG_CGC_GATE__JPEG0_DEC__SHIFT 0x0 +#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x8 +#define JPEG_CGC_GATE__JMCIF__SHIFT 0x9 +#define JPEG_CGC_GATE__JRBBM__SHIFT 0xa +#define JPEG_CGC_GATE__JPEG0_DEC_MASK 0x00000001L +#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000100L +#define JPEG_CGC_GATE__JMCIF_MASK 0x00000200L +#define JPEG_CGC_GATE__JRBBM_MASK 0x00000400L +//JPEG_CGC_CTRL +#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 +#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1 +#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5 +#define JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT 0x10 +#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x18 +#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x19 +#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x1a +#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L +#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL +#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x00001FE0L +#define JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK 0x00010000L +#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x01000000L +#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x02000000L +#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x04000000L +//JPEG_CGC_STATUS +#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE__SHIFT 0x0 +#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE__SHIFT 0x1 +#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x10 +#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x11 +#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x12 +#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x13 +#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x14 +#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE_MASK 0x00000001L +#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE_MASK 0x00000002L +#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00010000L +#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00020000L +#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00040000L +#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00080000L +#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00100000L +//JPEG_COMN_CGC_MEM_CTRL +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN__SHIFT 0x3 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN_MASK 0x00000008L +//JPEG_DEC_CGC_MEM_CTRL +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN__SHIFT 0x0 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN__SHIFT 0x1 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN__SHIFT 0x2 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN__SHIFT 0x3 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN_MASK 0x00000001L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN_MASK 0x00000002L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN_MASK 0x00000004L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN_MASK 0x00000008L +//JPEG_ENC_CGC_MEM_CTRL +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN__SHIFT 0x3 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN_MASK 0x00000008L +//JPEG_PERF_BANK_CONF +#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0 +#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8 +#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10 +#define JPEG_PERF_BANK_CONF__CORE_SEL__SHIFT 0x15 +#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL +#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L +#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L +#define JPEG_PERF_BANK_CONF__CORE_SEL_MASK 0x00E00000L +//JPEG_PERF_BANK_EVENT_SEL +#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0 +#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8 +#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10 +#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18 +#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL +#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L +#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L +#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L +//JPEG_PERF_BANK_COUNT0 +#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT1 +#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT2 +#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT3 +#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL + + +// addressBlock: uvd_uvd_pg_dec +//UVD_IPX_DLDO_CONFIG +#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT 0x2 +#define UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT 0x4 +#define UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT 0x6 +#define UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT 0x8 +#define UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT 0xa +#define UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT 0xc +#define UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG_MASK 0x0000000CL +#define UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG_MASK 0x00000030L +#define UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG_MASK 0x000000C0L +#define UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG_MASK 0x00000300L +#define UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG_MASK 0x00000C00L +#define UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG_MASK 0x00003000L +//UVD_IPX_DLDO_STATUS +#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS__SHIFT 0x1 +#define UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT 0x2 +#define UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT 0x3 +#define UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT 0x4 +#define UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT 0x5 +#define UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT 0x6 +#define UVD_IPX_DLDO_STATUS__ONO0_PWR_STATUS_MASK 0x00000002L +#define UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK 0x00000004L +#define UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK 0x00000008L +#define UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK 0x00000010L +#define UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK 0x00000020L +#define UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK 0x00000040L +//UVD_POWER_STATUS +#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0 +#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2 +#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4 +#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8 +#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9 +#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb +#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f +#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000001L +#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L +#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L +#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L +#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L +#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L +#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L +//UVD_JPEG_POWER_STATUS +#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0 +#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4 +#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8 +#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9 +#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f +#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L +#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L +#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L +#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L +#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L +//UVD_MC_DJPEG_RD_SPACE +#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE__SHIFT 0x0 +#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE_MASK 0x0003FFFFL +//UVD_MC_DJPEG_WR_SPACE +#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE__SHIFT 0x0 +#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE_MASK 0x0003FFFFL +//UVD_PG_IND_INDEX +#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL +//UVD_PG_IND_DATA +#define UVD_PG_IND_DATA__DATA__SHIFT 0x0 +#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL +//CC_UVD_HARVESTING +#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0 +#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1 +#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L +#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L +//UVD_DPG_LMA_CTL +#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0 +#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1 +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2 +#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4 +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0xe +#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L +#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L +#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFFC000L +//UVD_DPG_LMA_DATA +#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0 +#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL +//UVD_DPG_LMA_MASK +#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0 +#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL +//UVD_DPG_PAUSE +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L +//UVD_SCRATCH1 +#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0 +#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH2 +#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0 +#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH3 +#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0 +#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH4 +#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0 +#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH5 +#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0 +#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH6 +#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0 +#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH7 +#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0 +#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH8 +#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0 +#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH9 +#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0 +#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH10 +#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0 +#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH11 +#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0 +#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH12 +#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0 +#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH13 +#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0 +#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH14 +#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0 +#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL +//UVD_FREE_COUNTER_REG +#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0 +#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_DPG_VCPU_CACHE_OFFSET0 +#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0 +#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_VMID +#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL +//UVD_REG_FILTER_EN +#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN__SHIFT 0x0 +#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV__SHIFT 0x1 +#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN__SHIFT 0x2 +#define UVD_REG_FILTER_EN__JPEG_PRIV_EN__SHIFT 0x3 +#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN_MASK 0x00000001L +#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV_MASK 0x00000002L +#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN_MASK 0x00000004L +#define UVD_REG_FILTER_EN__JPEG_PRIV_EN_MASK 0x00000008L +//UVD_SECURITY_REG_VIO_REPORT +#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO__SHIFT 0x0 +#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO__SHIFT 0x1 +#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO__SHIFT 0x2 +#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO__SHIFT 0x3 +#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO__SHIFT 0x4 +#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO__SHIFT 0x5 +#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO_MASK 0x00000001L +#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO_MASK 0x00000002L +#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO_MASK 0x00000004L +#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO_MASK 0x00000008L +#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO_MASK 0x00000010L +#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO_MASK 0x00000020L +//UVD_FW_VERSION +#define UVD_FW_VERSION__FW_VERSION__SHIFT 0x0 +#define UVD_FW_VERSION__FW_VERSION_MASK 0xFFFFFFFFL +//UVD_PF_STATUS +#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0 +#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1 +#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2 +#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3 +#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4 +#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5 +#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6 +#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7 +#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8 +#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9 +#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa +#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb +#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc +#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd +#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe +#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf +#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10 +#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11 +#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12 +#define UVD_PF_STATUS__JPEG2_PF_OCCURED__SHIFT 0x13 +#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED__SHIFT 0x14 +#define UVD_PF_STATUS__JPEG2_PF_CLEAR__SHIFT 0x15 +#define UVD_PF_STATUS__ENCODER5_PF_OCCURED__SHIFT 0x16 +#define UVD_PF_STATUS__ENCODER5_PF_CLEAR__SHIFT 0x17 +#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L +#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L +#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L +#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L +#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L +#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L +#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L +#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L +#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L +#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L +#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L +#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L +#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L +#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L +#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L +#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L +#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L +#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L +#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L +#define UVD_PF_STATUS__JPEG2_PF_OCCURED_MASK 0x00080000L +#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED_MASK 0x00100000L +#define UVD_PF_STATUS__JPEG2_PF_CLEAR_MASK 0x00200000L +#define UVD_PF_STATUS__ENCODER5_PF_OCCURED_MASK 0x00400000L +#define UVD_PF_STATUS__ENCODER5_PF_CLEAR_MASK 0x00800000L +//UVD_DPG_CLK_EN_VCPU_REPORT +#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0 +#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1 +#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L +#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL +//CC_UVD_VCPU_ERR_DETECT_BOT_LO +#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO__SHIFT 0xc +#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO_MASK 0xFFFFF000L +//CC_UVD_VCPU_ERR_DETECT_BOT_HI +#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI_MASK 0x0000FFFFL +//CC_UVD_VCPU_ERR_DETECT_TOP_LO +#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO__SHIFT 0xc +#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO_MASK 0xFFFFF000L +//CC_UVD_VCPU_ERR_DETECT_TOP_HI +#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI_MASK 0x0000FFFFL +//CC_UVD_VCPU_ERR +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS__SHIFT 0x0 +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR__SHIFT 0x1 +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN__SHIFT 0x2 +#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS__SHIFT 0x3 +#define CC_UVD_VCPU_ERR__RESET_ON_FAULT__SHIFT 0x4 +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS_MASK 0x00000001L +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR_MASK 0x00000002L +#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN_MASK 0x00000004L +#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS_MASK 0x00000008L +#define CC_UVD_VCPU_ERR__RESET_ON_FAULT_MASK 0x00000010L +//CC_UVD_VCPU_ERR_INST_ADDR_LO +#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO_MASK 0xFFFFFFFFL +//CC_UVD_VCPU_ERR_INST_ADDR_HI +#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI__SHIFT 0x0 +#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI_MASK 0x0000FFFFL +//UVD_LMI_MMSCH_NC_SPACE +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE__SHIFT 0x3 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE__SHIFT 0x6 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE__SHIFT 0x9 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE__SHIFT 0xc +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE__SHIFT 0xf +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE__SHIFT 0x12 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE__SHIFT 0x15 +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE_MASK 0x00000007L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE_MASK 0x00000038L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE_MASK 0x000001C0L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE_MASK 0x00000E00L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE_MASK 0x00007000L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE_MASK 0x00038000L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE_MASK 0x001C0000L +#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE_MASK 0x00E00000L +//UVD_LMI_ATOMIC_SPACE +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE__SHIFT 0x0 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE__SHIFT 0x3 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE__SHIFT 0x6 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE__SHIFT 0x9 +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE_MASK 0x00000007L +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE_MASK 0x00000038L +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE_MASK 0x000001C0L +#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE_MASK 0x00000E00L +//UVD_GFX8_ADDR_CONFIG +#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4 +#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L +//UVD_GFX10_ADDR_CONFIG +#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6 +#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 +#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L +#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L +#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//UVD_GPCNT2_CNTL +#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0 +#define UVD_GPCNT2_CNTL__START__SHIFT 0x1 +#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2 +#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L +#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L +#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L +//UVD_GPCNT2_TARGET_LOWER +#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0 +#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL +//UVD_GPCNT2_STATUS_LOWER +#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0 +#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_GPCNT2_TARGET_UPPER +#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0 +#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL +//UVD_GPCNT2_STATUS_UPPER +#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0 +#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL +//UVD_GPCNT3_CNTL +#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0 +#define UVD_GPCNT3_CNTL__START__SHIFT 0x1 +#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2 +#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3 +#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa +#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L +#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L +#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L +#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L +#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L +//UVD_GPCNT3_TARGET_LOWER +#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0 +#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL +//UVD_GPCNT3_STATUS_LOWER +#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0 +#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_GPCNT3_TARGET_UPPER +#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0 +#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL +//UVD_GPCNT3_STATUS_UPPER +#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0 +#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL +//UVD_VCLK_DS_CNTL +#define UVD_VCLK_DS_CNTL__VCLK_DS_EN__SHIFT 0x0 +#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS__SHIFT 0x4 +#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT__SHIFT 0x10 +#define UVD_VCLK_DS_CNTL__VCLK_DS_EN_MASK 0x00000001L +#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS_MASK 0x00000010L +#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L +//UVD_DCLK_DS_CNTL +#define UVD_DCLK_DS_CNTL__DCLK_DS_EN__SHIFT 0x0 +#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS__SHIFT 0x4 +#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT__SHIFT 0x10 +#define UVD_DCLK_DS_CNTL__DCLK_DS_EN_MASK 0x00000001L +#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS_MASK 0x00000010L +#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L +//UVD_TSC_LOWER +#define UVD_TSC_LOWER__COUNT__SHIFT 0x0 +#define UVD_TSC_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_TSC_UPPER +#define UVD_TSC_UPPER__COUNT__SHIFT 0x0 +#define UVD_TSC_UPPER__COUNT_MASK 0x00FFFFFFL +//VCN_FEATURES +#define VCN_FEATURES__HAS_VIDEO_DEC__SHIFT 0x0 +#define VCN_FEATURES__HAS_VIDEO_ENC__SHIFT 0x1 +#define VCN_FEATURES__HAS_MJPEG_DEC__SHIFT 0x2 +#define VCN_FEATURES__HAS_MJPEG_ENC__SHIFT 0x3 +#define VCN_FEATURES__HAS_VIDEO_VIRT__SHIFT 0x4 +#define VCN_FEATURES__HAS_H264_LEGACY_DEC__SHIFT 0x5 +#define VCN_FEATURES__HAS_UDEC_DEC__SHIFT 0x6 +#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7 +#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8 +#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9 +#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa +#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb +#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc +#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd +#define VCN_FEATURES__HAS_AV1_ENC__SHIFT 0xe +#define VCN_FEATURES__INSTANCE_ID__SHIFT 0x1c +#define VCN_FEATURES__HAS_VIDEO_DEC_MASK 0x00000001L +#define VCN_FEATURES__HAS_VIDEO_ENC_MASK 0x00000002L +#define VCN_FEATURES__HAS_MJPEG_DEC_MASK 0x00000004L +#define VCN_FEATURES__HAS_MJPEG_ENC_MASK 0x00000008L +#define VCN_FEATURES__HAS_VIDEO_VIRT_MASK 0x00000010L +#define VCN_FEATURES__HAS_H264_LEGACY_DEC_MASK 0x00000020L +#define VCN_FEATURES__HAS_UDEC_DEC_MASK 0x00000040L +#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L +#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L +#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L +#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L +#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L +#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L +#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L +#define VCN_FEATURES__HAS_AV1_ENC_MASK 0x00004000L +#define VCN_FEATURES__INSTANCE_ID_MASK 0xF0000000L +//UVD_GPUIOV_STATUS +#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0 +#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L +//UVD_SCRATCH15 +#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0 +#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL +//UVD_VERSION +#define UVD_VERSION__VARIANT_TYPE__SHIFT 0x0 +#define UVD_VERSION__MINOR_VERSION__SHIFT 0x8 +#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10 +#define UVD_VERSION__INSTANCE_ID__SHIFT 0x1c +#define UVD_VERSION__VARIANT_TYPE_MASK 0x000000FFL +#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FF00L +#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L +#define UVD_VERSION__INSTANCE_ID_MASK 0xF0000000L +//VCN_UMSCH_CNTL +#define VCN_UMSCH_CNTL__umsch_fw_en__SHIFT 0x0 +#define VCN_UMSCH_CNTL__umsch_fw_en_MASK 0x00000001L +//VCN_JPEG_DB_CTRL +#define VCN_JPEG_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_JPEG_DB_CTRL__EN__SHIFT 0x1e +#define VCN_JPEG_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_JPEG_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_JPEG_DB_CTRL__EN_MASK 0x40000000L +#define VCN_JPEG_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB1_DB_CTRL +#define VCN_RB1_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB1_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB1_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB1_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB1_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB1_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB2_DB_CTRL +#define VCN_RB2_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB2_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB2_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB2_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB2_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB2_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB3_DB_CTRL +#define VCN_RB3_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB3_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB3_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB3_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB3_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB3_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB4_DB_CTRL +#define VCN_RB4_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB4_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB4_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB4_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB4_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB4_DB_CTRL__HIT_MASK 0x80000000L +//VCN_UMSCH_RB_DB_CTRL +#define VCN_UMSCH_RB_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_UMSCH_RB_DB_CTRL__EN__SHIFT 0x1e +#define VCN_UMSCH_RB_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_UMSCH_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_UMSCH_RB_DB_CTRL__EN_MASK 0x40000000L +#define VCN_UMSCH_RB_DB_CTRL__HIT_MASK 0x80000000L +//VCN_RB_DB_CTRL +#define VCN_RB_DB_CTRL__OFFSET__SHIFT 0x2 +#define VCN_RB_DB_CTRL__EN__SHIFT 0x1e +#define VCN_RB_DB_CTRL__HIT__SHIFT 0x1f +#define VCN_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL +#define VCN_RB_DB_CTRL__EN_MASK 0x40000000L +#define VCN_RB_DB_CTRL__HIT_MASK 0x80000000L +//VCN_AGDB_CTRL0 +#define VCN_AGDB_CTRL0__OFFSET__SHIFT 0x2 +#define VCN_AGDB_CTRL0__EN__SHIFT 0x1e +#define VCN_AGDB_CTRL0__HIT__SHIFT 0x1f +#define VCN_AGDB_CTRL0__OFFSET_MASK 0x0FFFFFFCL +#define VCN_AGDB_CTRL0__EN_MASK 0x40000000L +#define VCN_AGDB_CTRL0__HIT_MASK 0x80000000L +//VCN_AGDB_CTRL1 +#define VCN_AGDB_CTRL1__OFFSET__SHIFT 0x2 +#define VCN_AGDB_CTRL1__EN__SHIFT 0x1e +#define VCN_AGDB_CTRL1__HIT__SHIFT 0x1f +#define VCN_AGDB_CTRL1__OFFSET_MASK 0x0FFFFFFCL +#define VCN_AGDB_CTRL1__EN_MASK 0x40000000L +#define VCN_AGDB_CTRL1__HIT_MASK 0x80000000L +//VCN_AGDB_CTRL2 +#define VCN_AGDB_CTRL2__OFFSET__SHIFT 0x2 +#define VCN_AGDB_CTRL2__EN__SHIFT 0x1e +#define VCN_AGDB_CTRL2__HIT__SHIFT 0x1f +#define VCN_AGDB_CTRL2__OFFSET_MASK 0x0FFFFFFCL +#define VCN_AGDB_CTRL2__EN_MASK 0x40000000L +#define VCN_AGDB_CTRL2__HIT_MASK 0x80000000L +//VCN_AGDB_CTRL3 +#define VCN_AGDB_CTRL3__OFFSET__SHIFT 0x2 +#define VCN_AGDB_CTRL3__EN__SHIFT 0x1e +#define VCN_AGDB_CTRL3__HIT__SHIFT 0x1f +#define VCN_AGDB_CTRL3__OFFSET_MASK 0x0FFFFFFCL +#define VCN_AGDB_CTRL3__EN_MASK 0x40000000L +#define VCN_AGDB_CTRL3__HIT_MASK 0x80000000L +//VCN_AGDB_CTRL4 +#define VCN_AGDB_CTRL4__OFFSET__SHIFT 0x2 +#define VCN_AGDB_CTRL4__EN__SHIFT 0x1e +#define VCN_AGDB_CTRL4__HIT__SHIFT 0x1f +#define VCN_AGDB_CTRL4__OFFSET_MASK 0x0FFFFFFCL +#define VCN_AGDB_CTRL4__EN_MASK 0x40000000L +#define VCN_AGDB_CTRL4__HIT_MASK 0x80000000L +//VCN_AGDB_CTRL5 +#define VCN_AGDB_CTRL5__OFFSET__SHIFT 0x2 +#define VCN_AGDB_CTRL5__EN__SHIFT 0x1e +#define VCN_AGDB_CTRL5__HIT__SHIFT 0x1f +#define VCN_AGDB_CTRL5__OFFSET_MASK 0x0FFFFFFCL +#define VCN_AGDB_CTRL5__EN_MASK 0x40000000L +#define VCN_AGDB_CTRL5__HIT_MASK 0x80000000L +//VCN_AGDB_MASK0 +#define VCN_AGDB_MASK0__MASK__SHIFT 0x2 +#define VCN_AGDB_MASK0__MASK_MASK 0x0FFFFFFCL +//VCN_AGDB_MASK1 +#define VCN_AGDB_MASK1__MASK__SHIFT 0x2 +#define VCN_AGDB_MASK1__MASK_MASK 0x0FFFFFFCL +//VCN_AGDB_MASK2 +#define VCN_AGDB_MASK2__MASK__SHIFT 0x2 +#define VCN_AGDB_MASK2__MASK_MASK 0x0FFFFFFCL +//VCN_AGDB_MASK3 +#define VCN_AGDB_MASK3__MASK__SHIFT 0x2 +#define VCN_AGDB_MASK3__MASK_MASK 0x0FFFFFFCL +//VCN_AGDB_MASK4 +#define VCN_AGDB_MASK4__MASK__SHIFT 0x2 +#define VCN_AGDB_MASK4__MASK_MASK 0x0FFFFFFCL +//VCN_AGDB_MASK5 +#define VCN_AGDB_MASK5__MASK__SHIFT 0x2 +#define VCN_AGDB_MASK5__MASK_MASK 0x0FFFFFFCL +//VCN_RB_ENABLE +#define VCN_RB_ENABLE__RB_EN__SHIFT 0x0 +#define VCN_RB_ENABLE__JPEG_RB_EN__SHIFT 0x1 +#define VCN_RB_ENABLE__RB1_EN__SHIFT 0x2 +#define VCN_RB_ENABLE__RB2_EN__SHIFT 0x3 +#define VCN_RB_ENABLE__RB3_EN__SHIFT 0x4 +#define VCN_RB_ENABLE__RB4_EN__SHIFT 0x5 +#define VCN_RB_ENABLE__UMSCH_RB_EN__SHIFT 0x6 +#define VCN_RB_ENABLE__EJPEG_RB_EN__SHIFT 0x7 +#define VCN_RB_ENABLE__AUDIO_RB_EN__SHIFT 0x8 +#define VCN_RB_ENABLE__RB_EN_MASK 0x00000001L +#define VCN_RB_ENABLE__JPEG_RB_EN_MASK 0x00000002L +#define VCN_RB_ENABLE__RB1_EN_MASK 0x00000004L +#define VCN_RB_ENABLE__RB2_EN_MASK 0x00000008L +#define VCN_RB_ENABLE__RB3_EN_MASK 0x00000010L +#define VCN_RB_ENABLE__RB4_EN_MASK 0x00000020L +#define VCN_RB_ENABLE__UMSCH_RB_EN_MASK 0x00000040L +#define VCN_RB_ENABLE__EJPEG_RB_EN_MASK 0x00000080L +#define VCN_RB_ENABLE__AUDIO_RB_EN_MASK 0x00000100L +//VCN_RB_WPTR_CTRL +#define VCN_RB_WPTR_CTRL__RB_CS_EN__SHIFT 0x0 +#define VCN_RB_WPTR_CTRL__JPEG_CS_EN__SHIFT 0x1 +#define VCN_RB_WPTR_CTRL__RB1_CS_EN__SHIFT 0x2 +#define VCN_RB_WPTR_CTRL__RB2_CS_EN__SHIFT 0x3 +#define VCN_RB_WPTR_CTRL__RB3_CS_EN__SHIFT 0x4 +#define VCN_RB_WPTR_CTRL__RB4_CS_EN__SHIFT 0x5 +#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN__SHIFT 0x6 +#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN__SHIFT 0x7 +#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN__SHIFT 0x8 +#define VCN_RB_WPTR_CTRL__RB_CS_EN_MASK 0x00000001L +#define VCN_RB_WPTR_CTRL__JPEG_CS_EN_MASK 0x00000002L +#define VCN_RB_WPTR_CTRL__RB1_CS_EN_MASK 0x00000004L +#define VCN_RB_WPTR_CTRL__RB2_CS_EN_MASK 0x00000008L +#define VCN_RB_WPTR_CTRL__RB3_CS_EN_MASK 0x00000010L +#define VCN_RB_WPTR_CTRL__RB4_CS_EN_MASK 0x00000020L +#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN_MASK 0x00000040L +#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN_MASK 0x00000080L +#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN_MASK 0x00000100L +//UVD_RB_RPTR +#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR +#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_RPTR2 +#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR2 +#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_RPTR3 +#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR3 +#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_RPTR4 +#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR4 +#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L +//UVD_OUT_RB_RPTR +#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_OUT_RB_WPTR +#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_AUDIO_RB_RPTR +#define UVD_AUDIO_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_AUDIO_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_AUDIO_RB_WPTR +#define UVD_AUDIO_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_AUDIO_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RBC_RB_RPTR +#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_RBC_RB_WPTR +#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_DPG_LMA_CTL2 +#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL__SHIFT 0x0 +#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN__SHIFT 0x1 +#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR__SHIFT 0x2 +#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR__SHIFT 0x9 +#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL_MASK 0x00000001L +#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN_MASK 0x00000002L +#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR_MASK 0x000001FCL +#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L + + +// addressBlock: uvd_vcn_umsch_dec +//VCN_UMSCH_MES_CNTL +#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0 +#define VCN_UMSCH_MES_CNTL__PerfPipeSel__SHIFT 0x2 +#define VCN_UMSCH_MES_CNTL__RamClkGatingDisable__SHIFT 0x4 +#define VCN_UMSCH_MES_CNTL__InterruptChickenBit__SHIFT 0x5 +#define VCN_UMSCH_MES_CNTL__CpTcOneCycleWrDis__SHIFT 0x6 +#define VCN_UMSCH_MES_CNTL__PIPE_ID_MASK 0x00000003L +#define VCN_UMSCH_MES_CNTL__PerfPipeSel_MASK 0x0000000CL +#define VCN_UMSCH_MES_CNTL__RamClkGatingDisable_MASK 0x00000010L +#define VCN_UMSCH_MES_CNTL__InterruptChickenBit_MASK 0x00000020L +#define VCN_UMSCH_MES_CNTL__CpTcOneCycleWrDis_MASK 0x00000040L +//UMSCH_CTL +#define UMSCH_CTL__P_RESET__SHIFT 0x0 +#define UMSCH_CTL__UTCL2_CLIENT_ID__SHIFT 0x1 +#define UMSCH_CTL__UMSCH_BUSY__SHIFT 0xa +#define UMSCH_CTL__IllegalRegReadAckLatency__SHIFT 0xd +#define UMSCH_CTL__P_RESET_MASK 0x00000001L +#define UMSCH_CTL__UTCL2_CLIENT_ID_MASK 0x000003FEL +#define UMSCH_CTL__UMSCH_BUSY_MASK 0x00000400L +#define UMSCH_CTL__IllegalRegReadAckLatency_MASK 0x0000E000L +//UMSCH_CTL2 +#define UMSCH_CTL2__Spare__SHIFT 0x0 +#define UMSCH_CTL2__Spare_MASK 0xFFFFFFFFL +//VCN_UMSCH_AGDB_WPTR0 +#define VCN_UMSCH_AGDB_WPTR0__WPTR__SHIFT 0x4 +#define VCN_UMSCH_AGDB_WPTR0__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_AGDB_WPTR1 +#define VCN_UMSCH_AGDB_WPTR1__WPTR__SHIFT 0x4 +#define VCN_UMSCH_AGDB_WPTR1__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_AGDB_WPTR2 +#define VCN_UMSCH_AGDB_WPTR2__WPTR__SHIFT 0x4 +#define VCN_UMSCH_AGDB_WPTR2__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_AGDB_WPTR3 +#define VCN_UMSCH_AGDB_WPTR3__WPTR__SHIFT 0x4 +#define VCN_UMSCH_AGDB_WPTR3__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_AGDB_WPTR4 +#define VCN_UMSCH_AGDB_WPTR4__WPTR__SHIFT 0x4 +#define VCN_UMSCH_AGDB_WPTR4__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_AGDB_WPTR5 +#define VCN_UMSCH_AGDB_WPTR5__WPTR__SHIFT 0x4 +#define VCN_UMSCH_AGDB_WPTR5__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_MAILBOX0 +#define VCN_UMSCH_MAILBOX0__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX0__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX_RESP0 +#define VCN_UMSCH_MAILBOX_RESP0__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX_RESP0__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX1 +#define VCN_UMSCH_MAILBOX1__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX1__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX_RESP1 +#define VCN_UMSCH_MAILBOX_RESP1__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX_RESP1__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX2 +#define VCN_UMSCH_MAILBOX2__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX2__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX_RESP2 +#define VCN_UMSCH_MAILBOX_RESP2__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX_RESP2__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX3 +#define VCN_UMSCH_MAILBOX3__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX3__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MAILBOX_RESP3 +#define VCN_UMSCH_MAILBOX_RESP3__DATA__SHIFT 0x0 +#define VCN_UMSCH_MAILBOX_RESP3__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER0 +#define VCN_UMSCH_SPARE_REGISTER0__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER0__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER1 +#define VCN_UMSCH_SPARE_REGISTER1__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER1__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER2 +#define VCN_UMSCH_SPARE_REGISTER2__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER2__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER3 +#define VCN_UMSCH_SPARE_REGISTER3__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER3__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER4 +#define VCN_UMSCH_SPARE_REGISTER4__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER4__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER5 +#define VCN_UMSCH_SPARE_REGISTER5__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER5__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER6 +#define VCN_UMSCH_SPARE_REGISTER6__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER6__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_SPARE_REGISTER7 +#define VCN_UMSCH_SPARE_REGISTER7__DATA__SHIFT 0x0 +#define VCN_UMSCH_SPARE_REGISTER7__DATA_MASK 0xFFFFFFFFL +//VCN_UMSCH_MES_UTCL1_CNTL +#define VCN_UMSCH_MES_UTCL1_CNTL__REDO_LATENCY__SHIFT 0x0 +#define VCN_UMSCH_MES_UTCL1_CNTL__ForceSnoop__SHIFT 0x14 +#define VCN_UMSCH_MES_UTCL1_CNTL__FragLimitMode__SHIFT 0x15 +#define VCN_UMSCH_MES_UTCL1_CNTL__DropMode__SHIFT 0x16 +#define VCN_UMSCH_MES_UTCL1_CNTL__Invalidate__SHIFT 0x17 +#define VCN_UMSCH_MES_UTCL1_CNTL__REDO_LATENCY_MASK 0x000FFFFFL +#define VCN_UMSCH_MES_UTCL1_CNTL__ForceSnoop_MASK 0x00100000L +#define VCN_UMSCH_MES_UTCL1_CNTL__FragLimitMode_MASK 0x00200000L +#define VCN_UMSCH_MES_UTCL1_CNTL__DropMode_MASK 0x00400000L +#define VCN_UMSCH_MES_UTCL1_CNTL__Invalidate_MASK 0x00800000L +//VCN_UMSCH_MES_BUSY +#define VCN_UMSCH_MES_BUSY__MesScratchRamBusy__SHIFT 0x0 +#define VCN_UMSCH_MES_BUSY__MesInstrCacheBusy__SHIFT 0x1 +#define VCN_UMSCH_MES_BUSY__MesDataCacheBusy__SHIFT 0x2 +#define VCN_UMSCH_MES_BUSY__MesBusy__SHIFT 0x3 +#define VCN_UMSCH_MES_BUSY__MesLoadBusy__SHIFT 0x4 +#define VCN_UMSCH_MES_BUSY__MesMutexBusy__SHIFT 0x5 +#define VCN_UMSCH_MES_BUSY__MesThreadBusy__SHIFT 0x6 +#define VCN_UMSCH_MES_BUSY__MesMessageBusy__SHIFT 0x8 +#define VCN_UMSCH_MES_BUSY__MesTcBusy__SHIFT 0xa +#define VCN_UMSCH_MES_BUSY__MesDmaPending__SHIFT 0xc +#define VCN_UMSCH_MES_BUSY__MesScratchRamBusy_MASK 0x00000001L +#define VCN_UMSCH_MES_BUSY__MesInstrCacheBusy_MASK 0x00000002L +#define VCN_UMSCH_MES_BUSY__MesDataCacheBusy_MASK 0x00000004L +#define VCN_UMSCH_MES_BUSY__MesBusy_MASK 0x00000008L +#define VCN_UMSCH_MES_BUSY__MesLoadBusy_MASK 0x00000010L +#define VCN_UMSCH_MES_BUSY__MesMutexBusy_MASK 0x00000020L +#define VCN_UMSCH_MES_BUSY__MesThreadBusy_MASK 0x000000C0L +#define VCN_UMSCH_MES_BUSY__MesMessageBusy_MASK 0x00000300L +#define VCN_UMSCH_MES_BUSY__MesTcBusy_MASK 0x00000C00L +#define VCN_UMSCH_MES_BUSY__MesDmaPending_MASK 0x00003000L +//VCN_UMSCH_RB_BASE_LO +#define VCN_UMSCH_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define VCN_UMSCH_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//VCN_UMSCH_RB_BASE_HI +#define VCN_UMSCH_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define VCN_UMSCH_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//VCN_UMSCH_RB_SIZE +#define VCN_UMSCH_RB_SIZE__WPTR__SHIFT 0x4 +#define VCN_UMSCH_RB_SIZE__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_RB_RPTR +#define VCN_UMSCH_RB_RPTR__WPTR__SHIFT 0x4 +#define VCN_UMSCH_RB_RPTR__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_RB_WPTR +#define VCN_UMSCH_RB_WPTR__WPTR__SHIFT 0x4 +#define VCN_UMSCH_RB_WPTR__WPTR_MASK 0x007FFFF0L +//VCN_UMSCH_MASTINT_EN +#define VCN_UMSCH_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define VCN_UMSCH_MASTINT_EN__SYS_EN__SHIFT 0x2 +#define VCN_UMSCH_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define VCN_UMSCH_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define VCN_UMSCH_MASTINT_EN__SYS_EN_MASK 0x00000004L +#define VCN_UMSCH_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//VCN_UMSCH_IH_CTRL +#define VCN_UMSCH_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0 +#define VCN_UMSCH_IH_CTRL__IH_STALL_EN__SHIFT 0x1 +#define VCN_UMSCH_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2 +#define VCN_UMSCH_IH_CTRL__IH_VMID__SHIFT 0x3 +#define VCN_UMSCH_IH_CTRL__IH_USER_DATA__SHIFT 0x7 +#define VCN_UMSCH_IH_CTRL__IH_RINGID__SHIFT 0x13 +#define VCN_UMSCH_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L +#define VCN_UMSCH_IH_CTRL__IH_STALL_EN_MASK 0x00000002L +#define VCN_UMSCH_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L +#define VCN_UMSCH_IH_CTRL__IH_VMID_MASK 0x00000078L +#define VCN_UMSCH_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L +#define VCN_UMSCH_IH_CTRL__IH_RINGID_MASK 0x07F80000L +//VCN_UMSCH_SYS_INT_EN +#define VCN_UMSCH_SYS_INT_EN__INT0__SHIFT 0x0 +#define VCN_UMSCH_SYS_INT_EN__INT1__SHIFT 0x1 +#define VCN_UMSCH_SYS_INT_EN__INT2__SHIFT 0x2 +#define VCN_UMSCH_SYS_INT_EN__INT3__SHIFT 0x3 +#define VCN_UMSCH_SYS_INT_EN__INT4__SHIFT 0x4 +#define VCN_UMSCH_SYS_INT_EN__INT5__SHIFT 0x5 +#define VCN_UMSCH_SYS_INT_EN__INT6__SHIFT 0x6 +#define VCN_UMSCH_SYS_INT_EN__INT7__SHIFT 0x7 +#define VCN_UMSCH_SYS_INT_EN__INT0_MASK 0x00000001L +#define VCN_UMSCH_SYS_INT_EN__INT1_MASK 0x00000002L +#define VCN_UMSCH_SYS_INT_EN__INT2_MASK 0x00000004L +#define VCN_UMSCH_SYS_INT_EN__INT3_MASK 0x00000008L +#define VCN_UMSCH_SYS_INT_EN__INT4_MASK 0x00000010L +#define VCN_UMSCH_SYS_INT_EN__INT5_MASK 0x00000020L +#define VCN_UMSCH_SYS_INT_EN__INT6_MASK 0x00000040L +#define VCN_UMSCH_SYS_INT_EN__INT7_MASK 0x00000080L +//VCN_UMSCH_SYS_INT_STATUS +#define VCN_UMSCH_SYS_INT_STATUS__INT0__SHIFT 0x0 +#define VCN_UMSCH_SYS_INT_STATUS__INT1__SHIFT 0x1 +#define VCN_UMSCH_SYS_INT_STATUS__INT2__SHIFT 0x2 +#define VCN_UMSCH_SYS_INT_STATUS__INT3__SHIFT 0x3 +#define VCN_UMSCH_SYS_INT_STATUS__INT4__SHIFT 0x4 +#define VCN_UMSCH_SYS_INT_STATUS__INT5__SHIFT 0x5 +#define VCN_UMSCH_SYS_INT_STATUS__INT6__SHIFT 0x6 +#define VCN_UMSCH_SYS_INT_STATUS__INT7__SHIFT 0x7 +#define VCN_UMSCH_SYS_INT_STATUS__INT0_MASK 0x00000001L +#define VCN_UMSCH_SYS_INT_STATUS__INT1_MASK 0x00000002L +#define VCN_UMSCH_SYS_INT_STATUS__INT2_MASK 0x00000004L +#define VCN_UMSCH_SYS_INT_STATUS__INT3_MASK 0x00000008L +#define VCN_UMSCH_SYS_INT_STATUS__INT4_MASK 0x00000010L +#define VCN_UMSCH_SYS_INT_STATUS__INT5_MASK 0x00000020L +#define VCN_UMSCH_SYS_INT_STATUS__INT6_MASK 0x00000040L +#define VCN_UMSCH_SYS_INT_STATUS__INT7_MASK 0x00000080L +//VCN_UMSCH_SYS_INT_ACK +#define VCN_UMSCH_SYS_INT_ACK__INT0__SHIFT 0x0 +#define VCN_UMSCH_SYS_INT_ACK__INT1__SHIFT 0x1 +#define VCN_UMSCH_SYS_INT_ACK__INT2__SHIFT 0x2 +#define VCN_UMSCH_SYS_INT_ACK__INT3__SHIFT 0x3 +#define VCN_UMSCH_SYS_INT_ACK__INT4__SHIFT 0x4 +#define VCN_UMSCH_SYS_INT_ACK__INT5__SHIFT 0x5 +#define VCN_UMSCH_SYS_INT_ACK__INT6__SHIFT 0x6 +#define VCN_UMSCH_SYS_INT_ACK__INT7__SHIFT 0x7 +#define VCN_UMSCH_SYS_INT_ACK__INT0_MASK 0x00000001L +#define VCN_UMSCH_SYS_INT_ACK__INT1_MASK 0x00000002L +#define VCN_UMSCH_SYS_INT_ACK__INT2_MASK 0x00000004L +#define VCN_UMSCH_SYS_INT_ACK__INT3_MASK 0x00000008L +#define VCN_UMSCH_SYS_INT_ACK__INT4_MASK 0x00000010L +#define VCN_UMSCH_SYS_INT_ACK__INT5_MASK 0x00000020L +#define VCN_UMSCH_SYS_INT_ACK__INT6_MASK 0x00000040L +#define VCN_UMSCH_SYS_INT_ACK__INT7_MASK 0x00000080L +//VCN_UMSCH_SYS_INT_SRC +#define VCN_UMSCH_SYS_INT_SRC__INT0__SHIFT 0x0 +#define VCN_UMSCH_SYS_INT_SRC__INT1__SHIFT 0x1 +#define VCN_UMSCH_SYS_INT_SRC__INT2__SHIFT 0x2 +#define VCN_UMSCH_SYS_INT_SRC__INT3__SHIFT 0x3 +#define VCN_UMSCH_SYS_INT_SRC__INT4__SHIFT 0x4 +#define VCN_UMSCH_SYS_INT_SRC__INT5__SHIFT 0x5 +#define VCN_UMSCH_SYS_INT_SRC__INT6__SHIFT 0x6 +#define VCN_UMSCH_SYS_INT_SRC__INT7__SHIFT 0x7 +#define VCN_UMSCH_SYS_INT_SRC__INT0_MASK 0x00000001L +#define VCN_UMSCH_SYS_INT_SRC__INT1_MASK 0x00000002L +#define VCN_UMSCH_SYS_INT_SRC__INT2_MASK 0x00000004L +#define VCN_UMSCH_SYS_INT_SRC__INT3_MASK 0x00000008L +#define VCN_UMSCH_SYS_INT_SRC__INT4_MASK 0x00000010L +#define VCN_UMSCH_SYS_INT_SRC__INT5_MASK 0x00000020L +#define VCN_UMSCH_SYS_INT_SRC__INT6_MASK 0x00000040L +#define VCN_UMSCH_SYS_INT_SRC__INT7_MASK 0x00000080L +//VCN_UMSCH_IH_CTX_CTRL +#define VCN_UMSCH_IH_CTX_CTRL__IH_CTX_ID__SHIFT 0x0 +#define VCN_UMSCH_IH_CTX_CTRL__IH_CTX_ID_MASK 0x0FFFFFFFL +//UVD_UMSCH_FORCE +#define UVD_UMSCH_FORCE__IC_FORCE_GPUVM__SHIFT 0x0 +#define UVD_UMSCH_FORCE__DC_FORCE_GPUVM__SHIFT 0x1 +#define UVD_UMSCH_FORCE__FORCE_DROP_DISABLE__SHIFT 0x2 +#define UVD_UMSCH_FORCE__FORCE_DROP_INT_DISABLE__SHIFT 0x3 +#define UVD_UMSCH_FORCE__BYPASS_UTCL2_ATC_AUTO_RESP__SHIFT 0x4 +#define UVD_UMSCH_FORCE__IC_FORCE_GPUVM_MASK 0x00000001L +#define UVD_UMSCH_FORCE__DC_FORCE_GPUVM_MASK 0x00000002L +#define UVD_UMSCH_FORCE__FORCE_DROP_DISABLE_MASK 0x00000004L +#define UVD_UMSCH_FORCE__FORCE_DROP_INT_DISABLE_MASK 0x00000008L +#define UVD_UMSCH_FORCE__BYPASS_UTCL2_ATC_AUTO_RESP_MASK 0x00000010L +//UMSCH_MES_RESET_CTRL +#define UMSCH_MES_RESET_CTRL__MES_CORE_SOFT_RESET__SHIFT 0x0 +#define UMSCH_MES_RESET_CTRL__MES_CORE_SOFT_RESET_MASK 0x00000001L + + +// addressBlock: uvd_vcn_cprs64dec +//VCN_MES_PRGRM_CNTR_START +#define VCN_MES_PRGRM_CNTR_START__IP_START__SHIFT 0x0 +#define VCN_MES_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL +//VCN_MES_INTR_ROUTINE_START +#define VCN_MES_INTR_ROUTINE_START__IR_START__SHIFT 0x0 +#define VCN_MES_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL +//VCN_MES_MTVEC_LO +#define VCN_MES_MTVEC_LO__ADDR_LO__SHIFT 0x0 +#define VCN_MES_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL +//VCN_MES_INTR_ROUTINE_START_HI +#define VCN_MES_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0 +#define VCN_MES_INTR_ROUTINE_START_HI__IR_START_MASK 0xFFFFFFFFL +//VCN_MES_MTVEC_HI +#define VCN_MES_MTVEC_HI__ADDR_LO__SHIFT 0x0 +#define VCN_MES_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL +//VCN_MES_CNTL +#define VCN_MES_CNTL__MES_INVALIDATE_ICACHE__SHIFT 0x4 +#define VCN_MES_CNTL__MES_PIPE0_RESET__SHIFT 0x10 +#define VCN_MES_CNTL__MES_PIPE1_RESET__SHIFT 0x11 +#define VCN_MES_CNTL__MES_PIPE2_RESET__SHIFT 0x12 +#define VCN_MES_CNTL__MES_PIPE3_RESET__SHIFT 0x13 +#define VCN_MES_CNTL__MES_PIPE0_ACTIVE__SHIFT 0x1a +#define VCN_MES_CNTL__MES_PIPE1_ACTIVE__SHIFT 0x1b +#define VCN_MES_CNTL__MES_PIPE2_ACTIVE__SHIFT 0x1c +#define VCN_MES_CNTL__MES_PIPE3_ACTIVE__SHIFT 0x1d +#define VCN_MES_CNTL__MES_HALT__SHIFT 0x1e +#define VCN_MES_CNTL__MES_STEP__SHIFT 0x1f +#define VCN_MES_CNTL__MES_INVALIDATE_ICACHE_MASK 0x00000010L +#define VCN_MES_CNTL__MES_PIPE0_RESET_MASK 0x00010000L +#define VCN_MES_CNTL__MES_PIPE1_RESET_MASK 0x00020000L +#define VCN_MES_CNTL__MES_PIPE2_RESET_MASK 0x00040000L +#define VCN_MES_CNTL__MES_PIPE3_RESET_MASK 0x00080000L +#define VCN_MES_CNTL__MES_PIPE0_ACTIVE_MASK 0x04000000L +#define VCN_MES_CNTL__MES_PIPE1_ACTIVE_MASK 0x08000000L +#define VCN_MES_CNTL__MES_PIPE2_ACTIVE_MASK 0x10000000L +#define VCN_MES_CNTL__MES_PIPE3_ACTIVE_MASK 0x20000000L +#define VCN_MES_CNTL__MES_HALT_MASK 0x40000000L +#define VCN_MES_CNTL__MES_STEP_MASK 0x80000000L +//VCN_MES_PIPE_PRIORITY_CNTS +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0 +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8 +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10 +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18 +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L +#define VCN_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L +//VCN_MES_PIPE0_PRIORITY +#define VCN_MES_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0 +#define VCN_MES_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L +//VCN_MES_PIPE1_PRIORITY +#define VCN_MES_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0 +#define VCN_MES_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L +//VCN_MES_PIPE2_PRIORITY +#define VCN_MES_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0 +#define VCN_MES_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L +//VCN_MES_PIPE3_PRIORITY +#define VCN_MES_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0 +#define VCN_MES_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L +//VCN_MES_HEADER_DUMP +#define VCN_MES_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0 +#define VCN_MES_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL +//VCN_MES_MIE_LO +#define VCN_MES_MIE_LO__MES_INT__SHIFT 0x0 +#define VCN_MES_MIE_LO__MES_INT_MASK 0xFFFFFFFFL +//VCN_MES_MIE_HI +#define VCN_MES_MIE_HI__MES_INT__SHIFT 0x0 +#define VCN_MES_MIE_HI__MES_INT_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT +#define VCN_MES_INTERRUPT__MES_INT__SHIFT 0x0 +#define VCN_MES_INTERRUPT__MES_INT_MASK 0xFFFFFFFFL +//VCN_MES_SCRATCH_INDEX +#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0 +#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f +#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL +#define VCN_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L +//VCN_MES_SCRATCH_DATA +#define VCN_MES_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0 +#define VCN_MES_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL +//VCN_MES_INSTR_PNTR +#define VCN_MES_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0 +#define VCN_MES_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL +//VCN_MES_MSCRATCH_HI +#define VCN_MES_MSCRATCH_HI__DATA__SHIFT 0x0 +#define VCN_MES_MSCRATCH_HI__DATA_MASK 0xFFFFFFFFL +//VCN_MES_MSCRATCH_LO +#define VCN_MES_MSCRATCH_LO__DATA__SHIFT 0x0 +#define VCN_MES_MSCRATCH_LO__DATA_MASK 0xFFFFFFFFL +//VCN_MES_MSTATUS_LO +#define VCN_MES_MSTATUS_LO__STATUS_LO__SHIFT 0x0 +#define VCN_MES_MSTATUS_LO__STATUS_LO_MASK 0xFFFFFFFFL +//VCN_MES_MSTATUS_HI +#define VCN_MES_MSTATUS_HI__STATUS_HI__SHIFT 0x0 +#define VCN_MES_MSTATUS_HI__STATUS_HI_MASK 0xFFFFFFFFL +//VCN_MES_MEPC_LO +#define VCN_MES_MEPC_LO__MEPC_LO__SHIFT 0x0 +#define VCN_MES_MEPC_LO__MEPC_LO_MASK 0xFFFFFFFFL +//VCN_MES_MEPC_HI +#define VCN_MES_MEPC_HI__MEPC_HI__SHIFT 0x0 +#define VCN_MES_MEPC_HI__MEPC_HI_MASK 0xFFFFFFFFL +//VCN_MES_MCAUSE_LO +#define VCN_MES_MCAUSE_LO__CAUSE_LO__SHIFT 0x0 +#define VCN_MES_MCAUSE_LO__CAUSE_LO_MASK 0xFFFFFFFFL +//VCN_MES_MCAUSE_HI +#define VCN_MES_MCAUSE_HI__CAUSE_HI__SHIFT 0x0 +#define VCN_MES_MCAUSE_HI__CAUSE_HI_MASK 0xFFFFFFFFL +//VCN_MES_MBADADDR_LO +#define VCN_MES_MBADADDR_LO__ADDR_LO__SHIFT 0x0 +#define VCN_MES_MBADADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL +//VCN_MES_MBADADDR_HI +#define VCN_MES_MBADADDR_HI__ADDR_HI__SHIFT 0x0 +#define VCN_MES_MBADADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL +//VCN_MES_MIP_LO +#define VCN_MES_MIP_LO__MIP_LO__SHIFT 0x0 +#define VCN_MES_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL +//VCN_MES_MIP_HI +#define VCN_MES_MIP_HI__MIP_HI__SHIFT 0x0 +#define VCN_MES_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL +//VCN_MES_IC_OP_CNTL +#define VCN_MES_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0 +#define VCN_MES_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4 +#define VCN_MES_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5 +#define VCN_MES_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L +#define VCN_MES_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L +#define VCN_MES_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L +//VCN_MES_MCYCLE_LO +#define VCN_MES_MCYCLE_LO__CYCLE_LO__SHIFT 0x0 +#define VCN_MES_MCYCLE_LO__CYCLE_LO_MASK 0xFFFFFFFFL +//VCN_MES_MCYCLE_HI +#define VCN_MES_MCYCLE_HI__CYCLE_HI__SHIFT 0x0 +#define VCN_MES_MCYCLE_HI__CYCLE_HI_MASK 0xFFFFFFFFL +//VCN_MES_MTIME_LO +#define VCN_MES_MTIME_LO__TIME_LO__SHIFT 0x0 +#define VCN_MES_MTIME_LO__TIME_LO_MASK 0xFFFFFFFFL +//VCN_MES_MTIME_HI +#define VCN_MES_MTIME_HI__TIME_HI__SHIFT 0x0 +#define VCN_MES_MTIME_HI__TIME_HI_MASK 0xFFFFFFFFL +//VCN_MES_MINSTRET_LO +#define VCN_MES_MINSTRET_LO__INSTRET_LO__SHIFT 0x0 +#define VCN_MES_MINSTRET_LO__INSTRET_LO_MASK 0xFFFFFFFFL +//VCN_MES_MINSTRET_HI +#define VCN_MES_MINSTRET_HI__INSTRET_HI__SHIFT 0x0 +#define VCN_MES_MINSTRET_HI__INSTRET_HI_MASK 0xFFFFFFFFL +//VCN_MES_MISA_LO +#define VCN_MES_MISA_LO__MISA_LO__SHIFT 0x0 +#define VCN_MES_MISA_LO__MISA_LO_MASK 0xFFFFFFFFL +//VCN_MES_MISA_HI +#define VCN_MES_MISA_HI__MISA_HI__SHIFT 0x0 +#define VCN_MES_MISA_HI__MISA_HI_MASK 0xFFFFFFFFL +//VCN_MES_MVENDORID_LO +#define VCN_MES_MVENDORID_LO__MVENDORID_LO__SHIFT 0x0 +#define VCN_MES_MVENDORID_LO__MVENDORID_LO_MASK 0xFFFFFFFFL +//VCN_MES_MVENDORID_HI +#define VCN_MES_MVENDORID_HI__MVENDORID_HI__SHIFT 0x0 +#define VCN_MES_MVENDORID_HI__MVENDORID_HI_MASK 0xFFFFFFFFL +//VCN_MES_MARCHID_LO +#define VCN_MES_MARCHID_LO__MARCHID_LO__SHIFT 0x0 +#define VCN_MES_MARCHID_LO__MARCHID_LO_MASK 0xFFFFFFFFL +//VCN_MES_MARCHID_HI +#define VCN_MES_MARCHID_HI__MARCHID_HI__SHIFT 0x0 +#define VCN_MES_MARCHID_HI__MARCHID_HI_MASK 0xFFFFFFFFL +//VCN_MES_MIMPID_LO +#define VCN_MES_MIMPID_LO__MIMPID_LO__SHIFT 0x0 +#define VCN_MES_MIMPID_LO__MIMPID_LO_MASK 0xFFFFFFFFL +//VCN_MES_MIMPID_HI +#define VCN_MES_MIMPID_HI__MIMPID_HI__SHIFT 0x0 +#define VCN_MES_MIMPID_HI__MIMPID_HI_MASK 0xFFFFFFFFL +//VCN_MES_MHARTID_LO +#define VCN_MES_MHARTID_LO__MHARTID_LO__SHIFT 0x0 +#define VCN_MES_MHARTID_LO__MHARTID_LO_MASK 0xFFFFFFFFL +//VCN_MES_MHARTID_HI +#define VCN_MES_MHARTID_HI__MHARTID_HI__SHIFT 0x0 +#define VCN_MES_MHARTID_HI__MHARTID_HI_MASK 0xFFFFFFFFL +//VCN_MES_DC_BASE_CNTL +#define VCN_MES_DC_BASE_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18 +#define VCN_MES_DC_BASE_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L +//VCN_MES_DC_OP_CNTL +#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0 +#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1 +#define VCN_MES_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2 +#define VCN_MES_DC_OP_CNTL__DEPRECATED__SHIFT 0x3 +#define VCN_MES_DC_OP_CNTL__DEPRACATED__SHIFT 0x4 +#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L +#define VCN_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L +#define VCN_MES_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L +#define VCN_MES_DC_OP_CNTL__DEPRECATED_MASK 0x00000008L +#define VCN_MES_DC_OP_CNTL__DEPRACATED_MASK 0x00000010L +//VCN_MES_MTIMECMP_LO +#define VCN_MES_MTIMECMP_LO__TIME_LO__SHIFT 0x0 +#define VCN_MES_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL +//VCN_MES_MTIMECMP_HI +#define VCN_MES_MTIMECMP_HI__TIME_HI__SHIFT 0x0 +#define VCN_MES_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL +//VCN_MES_GP0_LO +#define VCN_MES_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0 +#define VCN_MES_GP0_LO__DATA__SHIFT 0x1 +#define VCN_MES_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L +#define VCN_MES_GP0_LO__DATA_MASK 0xFFFFFFFEL +//VCN_MES_GP0_HI +#define VCN_MES_GP0_HI__M_RET_ADDR__SHIFT 0x0 +#define VCN_MES_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL +//VCN_MES_GP1_LO +#define VCN_MES_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0 +#define VCN_MES_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL +//VCN_MES_GP1_HI +#define VCN_MES_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0 +#define VCN_MES_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL +//VCN_MES_GP2_LO +#define VCN_MES_GP2_LO__STACK_PNTR_LO__SHIFT 0x0 +#define VCN_MES_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL +//VCN_MES_GP2_HI +#define VCN_MES_GP2_HI__STACK_PNTR_HI__SHIFT 0x0 +#define VCN_MES_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL +//VCN_MES_GP3_LO +#define VCN_MES_GP3_LO__DATA__SHIFT 0x0 +#define VCN_MES_GP3_LO__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP3_HI +#define VCN_MES_GP3_HI__DATA__SHIFT 0x0 +#define VCN_MES_GP3_HI__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP4_LO +#define VCN_MES_GP4_LO__DATA__SHIFT 0x0 +#define VCN_MES_GP4_LO__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP4_HI +#define VCN_MES_GP4_HI__DATA__SHIFT 0x0 +#define VCN_MES_GP4_HI__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP5_LO +#define VCN_MES_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0 +#define VCN_MES_GP5_LO__DATA__SHIFT 0x1 +#define VCN_MES_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L +#define VCN_MES_GP5_LO__DATA_MASK 0xFFFFFFFEL +//VCN_MES_GP5_HI +#define VCN_MES_GP5_HI__M_RET_ADDR__SHIFT 0x0 +#define VCN_MES_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL +//VCN_MES_GP6_LO +#define VCN_MES_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0 +#define VCN_MES_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL +//VCN_MES_GP6_HI +#define VCN_MES_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0 +#define VCN_MES_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL +//VCN_MES_GP7_LO +#define VCN_MES_GP7_LO__STACK_PNTR_LO__SHIFT 0x0 +#define VCN_MES_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL +//VCN_MES_GP7_HI +#define VCN_MES_GP7_HI__STACK_PNTR_HI__SHIFT 0x0 +#define VCN_MES_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL +//VCN_MES_GP8_LO +#define VCN_MES_GP8_LO__DATA__SHIFT 0x0 +#define VCN_MES_GP8_LO__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP8_HI +#define VCN_MES_GP8_HI__DATA__SHIFT 0x0 +#define VCN_MES_GP8_HI__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP9_LO +#define VCN_MES_GP9_LO__DATA__SHIFT 0x0 +#define VCN_MES_GP9_LO__DATA_MASK 0xFFFFFFFFL +//VCN_MES_GP9_HI +#define VCN_MES_GP9_HI__DATA__SHIFT 0x0 +#define VCN_MES_GP9_HI__DATA_MASK 0xFFFFFFFFL +//VCN_MES_DM_INDEX_ADDR +#define VCN_MES_DM_INDEX_ADDR__ADDR__SHIFT 0x0 +#define VCN_MES_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL +//VCN_MES_DM_INDEX_DATA +#define VCN_MES_DM_INDEX_DATA__DATA__SHIFT 0x0 +#define VCN_MES_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL +//VCN_MES_LOCAL_BASE0_LO +#define VCN_MES_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10 +#define VCN_MES_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L +//VCN_MES_LOCAL_BASE0_HI +#define VCN_MES_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0 +#define VCN_MES_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL +//VCN_MES_LOCAL_MASK0_LO +#define VCN_MES_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10 +#define VCN_MES_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L +//VCN_MES_LOCAL_MASK0_HI +#define VCN_MES_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0 +#define VCN_MES_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL +//VCN_MES_LOCAL_APERTURE +#define VCN_MES_LOCAL_APERTURE__APERTURE__SHIFT 0x0 +#define VCN_MES_LOCAL_APERTURE__APERTURE_MASK 0x00000007L +//VCN_MES_LOCAL_INSTR_BASE_LO +#define VCN_MES_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10 +#define VCN_MES_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L +//VCN_MES_LOCAL_INSTR_BASE_HI +#define VCN_MES_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0 +#define VCN_MES_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL +//VCN_MES_LOCAL_INSTR_MASK_LO +#define VCN_MES_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10 +#define VCN_MES_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L +//VCN_MES_LOCAL_INSTR_MASK_HI +#define VCN_MES_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0 +#define VCN_MES_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL +//VCN_MES_LOCAL_INSTR_APERTURE +#define VCN_MES_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0 +#define VCN_MES_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L +//VCN_MES_LOCAL_SCRATCH_APERTURE +#define VCN_MES_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0 +#define VCN_MES_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L +//VCN_MES_LOCAL_SCRATCH_BASE_LO +#define VCN_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10 +#define VCN_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L +//VCN_MES_LOCAL_SCRATCH_BASE_HI +#define VCN_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0 +#define VCN_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL +//VCN_MES_PERFCOUNT_CNTL +#define VCN_MES_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0 +#define VCN_MES_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL +//VCN_MES_PENDING_INTERRUPT +#define VCN_MES_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0 +#define VCN_MES_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL +//VCN_MES_PRGRM_CNTR_START_HI +#define VCN_MES_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0 +#define VCN_MES_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL +//VCN_MES_INTERRUPT_DATA_16 +#define VCN_MES_INTERRUPT_DATA_16__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_17 +#define VCN_MES_INTERRUPT_DATA_17__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_18 +#define VCN_MES_INTERRUPT_DATA_18__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_19 +#define VCN_MES_INTERRUPT_DATA_19__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_20 +#define VCN_MES_INTERRUPT_DATA_20__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_21 +#define VCN_MES_INTERRUPT_DATA_21__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_22 +#define VCN_MES_INTERRUPT_DATA_22__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_23 +#define VCN_MES_INTERRUPT_DATA_23__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_24 +#define VCN_MES_INTERRUPT_DATA_24__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_25 +#define VCN_MES_INTERRUPT_DATA_25__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_26 +#define VCN_MES_INTERRUPT_DATA_26__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_27 +#define VCN_MES_INTERRUPT_DATA_27__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_28 +#define VCN_MES_INTERRUPT_DATA_28__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_29 +#define VCN_MES_INTERRUPT_DATA_29__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_30 +#define VCN_MES_INTERRUPT_DATA_30__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL +//VCN_MES_INTERRUPT_DATA_31 +#define VCN_MES_INTERRUPT_DATA_31__DATA__SHIFT 0x0 +#define VCN_MES_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE0_BASE +#define VCN_MES_DC_APERTURE0_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE0_MASK +#define VCN_MES_DC_APERTURE0_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE0_CNTL +#define VCN_MES_DC_APERTURE0_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE1_BASE +#define VCN_MES_DC_APERTURE1_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE1_MASK +#define VCN_MES_DC_APERTURE1_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE1_CNTL +#define VCN_MES_DC_APERTURE1_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE2_BASE +#define VCN_MES_DC_APERTURE2_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE2_MASK +#define VCN_MES_DC_APERTURE2_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE2_CNTL +#define VCN_MES_DC_APERTURE2_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE3_BASE +#define VCN_MES_DC_APERTURE3_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE3_MASK +#define VCN_MES_DC_APERTURE3_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE3_CNTL +#define VCN_MES_DC_APERTURE3_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE4_BASE +#define VCN_MES_DC_APERTURE4_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE4_MASK +#define VCN_MES_DC_APERTURE4_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE4_CNTL +#define VCN_MES_DC_APERTURE4_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE5_BASE +#define VCN_MES_DC_APERTURE5_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE5_MASK +#define VCN_MES_DC_APERTURE5_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE5_CNTL +#define VCN_MES_DC_APERTURE5_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE6_BASE +#define VCN_MES_DC_APERTURE6_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE6_MASK +#define VCN_MES_DC_APERTURE6_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE6_CNTL +#define VCN_MES_DC_APERTURE6_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE7_BASE +#define VCN_MES_DC_APERTURE7_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE7_MASK +#define VCN_MES_DC_APERTURE7_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE7_CNTL +#define VCN_MES_DC_APERTURE7_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE8_BASE +#define VCN_MES_DC_APERTURE8_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE8_MASK +#define VCN_MES_DC_APERTURE8_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE8_CNTL +#define VCN_MES_DC_APERTURE8_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE9_BASE +#define VCN_MES_DC_APERTURE9_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE9_MASK +#define VCN_MES_DC_APERTURE9_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE9_CNTL +#define VCN_MES_DC_APERTURE9_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE10_BASE +#define VCN_MES_DC_APERTURE10_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE10_MASK +#define VCN_MES_DC_APERTURE10_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE10_CNTL +#define VCN_MES_DC_APERTURE10_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE11_BASE +#define VCN_MES_DC_APERTURE11_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE11_MASK +#define VCN_MES_DC_APERTURE11_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE11_CNTL +#define VCN_MES_DC_APERTURE11_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE12_BASE +#define VCN_MES_DC_APERTURE12_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE12_MASK +#define VCN_MES_DC_APERTURE12_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE12_CNTL +#define VCN_MES_DC_APERTURE12_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE13_BASE +#define VCN_MES_DC_APERTURE13_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE13_MASK +#define VCN_MES_DC_APERTURE13_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE13_CNTL +#define VCN_MES_DC_APERTURE13_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE14_BASE +#define VCN_MES_DC_APERTURE14_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE14_MASK +#define VCN_MES_DC_APERTURE14_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE14_CNTL +#define VCN_MES_DC_APERTURE14_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L +//VCN_MES_DC_APERTURE15_BASE +#define VCN_MES_DC_APERTURE15_BASE__BASE__SHIFT 0x0 +#define VCN_MES_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE15_MASK +#define VCN_MES_DC_APERTURE15_MASK__MASK__SHIFT 0x0 +#define VCN_MES_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL +//VCN_MES_DC_APERTURE15_CNTL +#define VCN_MES_DC_APERTURE15_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4 +#define VCN_MES_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L + + +// addressBlock: uvd_vcn_hypdec +//VCN_MES_IC_BASE_LO +#define VCN_MES_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc +#define VCN_MES_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L +//VCN_MES_MIBASE_LO +#define VCN_MES_MIBASE_LO__IC_BASE_LO__SHIFT 0xc +#define VCN_MES_MIBASE_LO__IC_BASE_LO_MASK 0xFFFFF000L +//VCN_MES_IC_BASE_HI +#define VCN_MES_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0 +#define VCN_MES_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL +//VCN_MES_MIBASE_HI +#define VCN_MES_MIBASE_HI__IC_BASE_HI__SHIFT 0x0 +#define VCN_MES_MIBASE_HI__IC_BASE_HI_MASK 0x0000FFFFL +//VCN_MES_IC_BASE_CNTL +#define VCN_MES_IC_BASE_CNTL__VMID__SHIFT 0x0 +#define VCN_MES_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17 +#define VCN_MES_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18 +#define VCN_MES_IC_BASE_CNTL__VMID_MASK 0x0000000FL +#define VCN_MES_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L +#define VCN_MES_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L +//VCN_MES_DC_BASE_LO +#define VCN_MES_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10 +#define VCN_MES_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L +//VCN_MES_MDBASE_LO +#define VCN_MES_MDBASE_LO__BASE_LO__SHIFT 0x10 +#define VCN_MES_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L +//VCN_MES_DC_BASE_HI +#define VCN_MES_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0 +#define VCN_MES_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL +//VCN_MES_MDBASE_HI +#define VCN_MES_MDBASE_HI__BASE_HI__SHIFT 0x0 +#define VCN_MES_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL +//VCN_MES_MIBOUND_LO +#define VCN_MES_MIBOUND_LO__BOUND_LO__SHIFT 0x0 +#define VCN_MES_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL +//VCN_MES_MIBOUND_HI +#define VCN_MES_MIBOUND_HI__BOUND_HI__SHIFT 0x0 +#define VCN_MES_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL +//VCN_MES_MDBOUND_LO +#define VCN_MES_MDBOUND_LO__BOUND_LO__SHIFT 0x0 +#define VCN_MES_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL +//VCN_MES_MDBOUND_HI +#define VCN_MES_MDBOUND_HI__BOUND_HI__SHIFT 0x0 +#define VCN_MES_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL + + +// addressBlock: uvd_slmi_adpdec +//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC_VMID +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L +//UVD_LMI_MMSCH_CTRL +#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0 +#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1 +#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH__SHIFT 0x2 +#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3 +#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5 +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7 +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9 +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc +#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L +#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L +#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH_MASK 0x00000004L +#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L +#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L +//UVD_MMSCH_LMI_STATUS +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT__SHIFT 0x0 +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0x1 +#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN__SHIFT 0x2 +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN__SHIFT 0x4 +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS__SHIFT 0x8 +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE__SHIFT 0xc +#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN__SHIFT 0xd +#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN__SHIFT 0xe +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT_MASK 0x00000001L +#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00000002L +#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN_MASK 0x00000004L +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN_MASK 0x000000F0L +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS_MASK 0x00000700L +#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE_MASK 0x00001000L +#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN_MASK 0x00002000L +#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN_MASK 0x00004000L +//UMSCH_IOV_ACTIVE_FCN_ID +#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0 +#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f +#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000003FL +#define UMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L +//UVD_UMSCH_LMI_STATUS +#define UVD_UMSCH_LMI_STATUS__UMSCHIC_RD_CLEAN__SHIFT 0x0 +#define UVD_UMSCH_LMI_STATUS__UMSCHDC_RD_CLEAN__SHIFT 0x1 +#define UVD_UMSCH_LMI_STATUS__UMSCHDC_WR_CLEAN__SHIFT 0x2 +#define UVD_UMSCH_LMI_STATUS__UMSCHIC_RD_CLEAN_MASK 0x00000001L +#define UVD_UMSCH_LMI_STATUS__UMSCHDC_RD_CLEAN_MASK 0x00000002L +#define UVD_UMSCH_LMI_STATUS__UMSCHDC_WR_CLEAN_MASK 0x00000004L + + +// addressBlock: uvdctxind +//UVD_CGC_MEM_CTRL +#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x0 +#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x1 +#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x2 +#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x3 +#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x4 +#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x5 +#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x6 +#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x7 +#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x8 +#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x9 +#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0xa +#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0xc +#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0xd +#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN__SHIFT 0xe +#define UVD_CGC_MEM_CTRL__MPC1_LS_EN__SHIFT 0xf +#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10 +#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14 +#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L +#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L +#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L +#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L +#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L +#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L +#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L +#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L +#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L +#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L +#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L +#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L +#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L +#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN_MASK 0x00004000L +#define UVD_CGC_MEM_CTRL__MPC1_LS_EN_MASK 0x00008000L +#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L +#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L +//UVD_CGC_CTRL2 +#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x0 +#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x1 +#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x2 +#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L +#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L +#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001CL +//UVD_CGC_MEM_DS_CTRL +#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN__SHIFT 0x0 +#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN__SHIFT 0x1 +#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN__SHIFT 0x2 +#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN__SHIFT 0x3 +#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN__SHIFT 0x4 +#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN__SHIFT 0x5 +#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN__SHIFT 0x6 +#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN__SHIFT 0x7 +#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN__SHIFT 0x8 +#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN__SHIFT 0x9 +#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN__SHIFT 0xa +#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN__SHIFT 0xc +#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN__SHIFT 0xd +#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN__SHIFT 0xe +#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN__SHIFT 0xf +#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN_MASK 0x00000001L +#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN_MASK 0x00000002L +#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN_MASK 0x00000004L +#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN_MASK 0x00000008L +#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN_MASK 0x00000010L +#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN_MASK 0x00000020L +#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN_MASK 0x00000040L +#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN_MASK 0x00000080L +#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN_MASK 0x00000100L +#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN_MASK 0x00000200L +#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN_MASK 0x00000400L +#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN_MASK 0x00001000L +#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN_MASK 0x00002000L +#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN_MASK 0x00004000L +#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN_MASK 0x00008000L +//UVD_CGC_MEM_SD_CTRL +#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN__SHIFT 0x0 +#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN__SHIFT 0x1 +#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN__SHIFT 0x2 +#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN__SHIFT 0x3 +#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN__SHIFT 0x4 +#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN__SHIFT 0x5 +#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN__SHIFT 0x6 +#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN__SHIFT 0x7 +#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN__SHIFT 0x8 +#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN__SHIFT 0x9 +#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN__SHIFT 0xa +#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN__SHIFT 0xc +#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN__SHIFT 0xd +#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN__SHIFT 0xe +#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN__SHIFT 0xf +#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN_MASK 0x00000001L +#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN_MASK 0x00000002L +#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN_MASK 0x00000004L +#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN_MASK 0x00000008L +#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN_MASK 0x00000010L +#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN_MASK 0x00000020L +#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN_MASK 0x00000040L +#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN_MASK 0x00000080L +#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN_MASK 0x00000100L +#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN_MASK 0x00000200L +#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN_MASK 0x00000400L +#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN_MASK 0x00001000L +#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN_MASK 0x00002000L +#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN_MASK 0x00004000L +#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN_MASK 0x00008000L +//UVD_SW_SCRATCH_00 +#define UVD_SW_SCRATCH_00__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_00__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_01 +#define UVD_SW_SCRATCH_01__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_01__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_02 +#define UVD_SW_SCRATCH_02__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_02__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_03 +#define UVD_SW_SCRATCH_03__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_03__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_04 +#define UVD_SW_SCRATCH_04__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_04__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_05 +#define UVD_SW_SCRATCH_05__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_05__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_06 +#define UVD_SW_SCRATCH_06__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_06__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_07 +#define UVD_SW_SCRATCH_07__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_07__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_08 +#define UVD_SW_SCRATCH_08__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_08__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_09 +#define UVD_SW_SCRATCH_09__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_09__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_10 +#define UVD_SW_SCRATCH_10__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_10__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_11 +#define UVD_SW_SCRATCH_11__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_11__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_12 +#define UVD_SW_SCRATCH_12__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_12__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_13 +#define UVD_SW_SCRATCH_13__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_13__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_14 +#define UVD_SW_SCRATCH_14__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_14__DATA_MASK 0xFFFFFFFFL +//UVD_SW_SCRATCH_15 +#define UVD_SW_SCRATCH_15__DATA__SHIFT 0x0 +#define UVD_SW_SCRATCH_15__DATA_MASK 0xFFFFFFFFL +//UVD_IH_SEM_CTRL +#define UVD_IH_SEM_CTRL__IH_STALL_EN__SHIFT 0x0 +#define UVD_IH_SEM_CTRL__SEM_STALL_EN__SHIFT 0x1 +#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN__SHIFT 0x2 +#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN__SHIFT 0x3 +#define UVD_IH_SEM_CTRL__IH_VMID__SHIFT 0x4 +#define UVD_IH_SEM_CTRL__IH_USER_DATA__SHIFT 0x8 +#define UVD_IH_SEM_CTRL__IH_RINGID__SHIFT 0x14 +#define UVD_IH_SEM_CTRL__IH_STALL_EN_MASK 0x00000001L +#define UVD_IH_SEM_CTRL__SEM_STALL_EN_MASK 0x00000002L +#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L +#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN_MASK 0x00000008L +#define UVD_IH_SEM_CTRL__IH_VMID_MASK 0x000000F0L +#define UVD_IH_SEM_CTRL__IH_USER_DATA_MASK 0x000FFF00L +#define UVD_IH_SEM_CTRL__IH_RINGID_MASK 0x0FF00000L +//UVD_MISC_FEATURE_CTL +#define UVD_MISC_FEATURE_CTL__ROW_PREEMPT_EN__SHIFT 0x0 +#define UVD_MISC_FEATURE_CTL__PREEMPT_BLOCKIF_DIS_EN__SHIFT 0x1 +#define UVD_MISC_FEATURE_CTL__ROW_PREEMPT_EN_MASK 0x00000001L +#define UVD_MISC_FEATURE_CTL__PREEMPT_BLOCKIF_DIS_EN_MASK 0x00000002L + + +// addressBlock: lmi_adp_indirect +//UVD_LMI_CRC0 +#define UVD_LMI_CRC0__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC1 +#define UVD_LMI_CRC1__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC2 +#define UVD_LMI_CRC2__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC3 +#define UVD_LMI_CRC3__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC10 +#define UVD_LMI_CRC10__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC10__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC11 +#define UVD_LMI_CRC11__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC11__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC12 +#define UVD_LMI_CRC12__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC12__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC13 +#define UVD_LMI_CRC13__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC13__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC14 +#define UVD_LMI_CRC14__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC14__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC15 +#define UVD_LMI_CRC15__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC15__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_SWAP_CNTL2 +#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x0 +#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x2 +#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x4 +#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP__SHIFT 0xc +#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP__SHIFT 0xe +#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L +#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000CL +#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP_MASK 0x00000FF0L +#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP_MASK 0x00003000L +#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP_MASK 0x0000C000L +//UVD_MEMCHECK_SYS_INT_EN +#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN__SHIFT 0x0 +#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN__SHIFT 0x1 +#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN__SHIFT 0x2 +#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN__SHIFT 0x3 +#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN__SHIFT 0x4 +#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN__SHIFT 0x5 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6 +#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN__SHIFT 0x7 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb +#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN__SHIFT 0xc +#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN__SHIFT 0xf +#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN__SHIFT 0x10 +#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN__SHIFT 0x11 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12 +#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13 +#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14 +#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15 +#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18 +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x1b +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1c +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1d +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1e +#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN__SHIFT 0x1f +#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN_MASK 0x00000001L +#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN_MASK 0x00000002L +#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN_MASK 0x00000004L +#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN_MASK 0x00000008L +#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN_MASK 0x00000010L +#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN_MASK 0x00000020L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L +#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN_MASK 0x00000080L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L +#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN_MASK 0x00001000L +#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L +#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L +#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L +#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L +#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L +#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L +#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN_MASK 0x08000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN_MASK 0x10000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN_MASK 0x20000000L +#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x40000000L +#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN_MASK 0x80000000L +//UVD_MEMCHECK_SYS_INT_STAT +#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR__SHIFT 0xe +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR__SHIFT 0xf +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f +#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L +#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L +#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L +//UVD_MEMCHECK_SYS_INT_ACK +#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK__SHIFT 0xe +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK__SHIFT 0xf +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f +#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L +#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L +#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L +//UVD_MEMCHECK_VCPU_INT_EN +#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN__SHIFT 0x0 +#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN__SHIFT 0x1 +#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN__SHIFT 0x2 +#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN__SHIFT 0x3 +#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN__SHIFT 0x4 +#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN__SHIFT 0x5 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6 +#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN__SHIFT 0x7 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb +#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN__SHIFT 0xc +#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN__SHIFT 0xf +#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN__SHIFT 0x10 +#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN__SHIFT 0x11 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12 +#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13 +#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14 +#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15 +#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x19 +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1a +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1b +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1c +#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN__SHIFT 0x1d +#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN_MASK 0x00000001L +#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN_MASK 0x00000002L +#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN_MASK 0x00000004L +#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN_MASK 0x00000008L +#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN_MASK 0x00000010L +#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN_MASK 0x00000020L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L +#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN_MASK 0x00000080L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L +#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN_MASK 0x00001000L +#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L +#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L +#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L +#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L +#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L +#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L +#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN_MASK 0x02000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN_MASK 0x04000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN_MASK 0x08000000L +#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x10000000L +#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN_MASK 0x20000000L +//UVD_MEMCHECK_VCPU_INT_STAT +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR__SHIFT 0xe +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR__SHIFT 0xf +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L +#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L +#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L +//UVD_MEMCHECK_VCPU_INT_ACK +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK__SHIFT 0xe +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK__SHIFT 0xf +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L +#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L +#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L +//UVD_MEMCHECK2_SYS_INT_STAT +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x1a +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x1b +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x1c +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x1d +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR__SHIFT 0x1e +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR__SHIFT 0x1f +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x04000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x08000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x10000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x20000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR_MASK 0x40000000L +#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR_MASK 0x80000000L +//UVD_MEMCHECK2_SYS_INT_ACK +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x1a +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x1b +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x1c +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x1d +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK__SHIFT 0x1e +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK__SHIFT 0x1f +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x04000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x08000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x10000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x20000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK_MASK 0x40000000L +#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK_MASK 0x80000000L +//UVD_MEMCHECK2_VCPU_INT_STAT +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0 +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1 +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2 +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5 +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6 +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9 +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10 +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x12 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x13 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x14 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x15 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x16 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x17 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x18 +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x19 +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR__SHIFT 0x1a +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR__SHIFT 0x1b +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L +#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L +#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L +#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L +#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00040000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00080000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x00100000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x00200000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x00400000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x00800000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x01000000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x02000000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR_MASK 0x04000000L +#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR_MASK 0x08000000L +//UVD_MEMCHECK2_VCPU_INT_ACK +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0 +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1 +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2 +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5 +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6 +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9 +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10 +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x12 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x13 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x14 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x15 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x16 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x17 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x18 +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x19 +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK__SHIFT 0x1a +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK__SHIFT 0x1b +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L +#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L +#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L +#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L +#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00040000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00080000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x00100000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x00200000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x00400000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x00800000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x01000000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x02000000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK_MASK 0x04000000L +#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK_MASK 0x08000000L + + +#endif diff --git a/drivers/gpu/drm/amd/include/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h index e8fae5c77514..2bfd6d0ff050 100644 --- a/drivers/gpu/drm/amd/include/atom-bits.h +++ b/drivers/gpu/drm/amd/include/atom-bits.h @@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr) #define CU8(ptr) get_u8(ctx->bios, (ptr)) static inline uint16_t get_u16(void *bios, int ptr) { - return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); + return get_u8(bios, ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); } #define U16(ptr) get_u16(ctx->ctx->bios, (ptr)) #define CU16(ptr) get_u16(ctx->bios, (ptr)) diff --git a/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h b/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h index 26044cb285d2..48542ea6882a 100644 --- a/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h +++ b/drivers/gpu/drm/amd/include/beige_goby_ip_offset.h @@ -26,13 +26,11 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 60a6536ff656..f40b6a03fe63 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -149,27 +149,26 @@ struct cgs_ops { struct cgs_os_ops; /* To be define in OS-specific CGS header */ -struct cgs_device -{ +struct cgs_device { const struct cgs_ops *ops; /* to be embedded at the start of driver private structure */ }; /* Convenience macros that make CGS indirect function calls look like * normal function calls */ -#define CGS_CALL(func,dev,...) \ +#define CGS_CALL(func, dev, ...) \ (((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__)) -#define CGS_OS_CALL(func,dev,...) \ +#define CGS_OS_CALL(func, dev, ...) \ (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__)) -#define cgs_read_register(dev,offset) \ - CGS_CALL(read_register,dev,offset) -#define cgs_write_register(dev,offset,value) \ - CGS_CALL(write_register,dev,offset,value) -#define cgs_read_ind_register(dev,space,index) \ - CGS_CALL(read_ind_register,dev,space,index) -#define cgs_write_ind_register(dev,space,index,value) \ - CGS_CALL(write_ind_register,dev,space,index,value) +#define cgs_read_register(dev, offset) \ + CGS_CALL(read_register, dev, offset) +#define cgs_write_register(dev, offset, value) \ + CGS_CALL(write_register, dev, offset, value) +#define cgs_read_ind_register(dev, space, index) \ + CGS_CALL(read_ind_register, dev, space, index) +#define cgs_write_ind_register(dev, space, index, value) \ + CGS_CALL(write_ind_register, dev, space, index, value) #define cgs_get_firmware_info(dev, type, info) \ CGS_CALL(get_firmware_info, dev, type, info) diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h index ce79e5de8ce3..1a73296a9a74 100644 --- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h +++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; } __maybe_unused; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h b/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h index f84996a73de9..53cb4296df88 100644 --- a/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h +++ b/drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h index 1d93a0c574c9..acd1cef61b7c 100644 --- a/drivers/gpu/drm/amd/include/dm_pp_interface.h +++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h @@ -27,7 +27,7 @@ #define PP_MAX_CLOCK_LEVELS 16 -enum amd_pp_display_config_type{ +enum amd_pp_display_config_type { AMD_PP_DisplayConfigType_None = 0, AMD_PP_DisplayConfigType_DP54 , AMD_PP_DisplayConfigType_DP432 , @@ -36,8 +36,8 @@ enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_DP243, AMD_PP_DisplayConfigType_DP216, AMD_PP_DisplayConfigType_DP162, - AMD_PP_DisplayConfigType_HDMI6G , - AMD_PP_DisplayConfigType_HDMI297 , + AMD_PP_DisplayConfigType_HDMI6G, + AMD_PP_DisplayConfigType_HDMI297, AMD_PP_DisplayConfigType_HDMI162, AMD_PP_DisplayConfigType_LVDS, AMD_PP_DisplayConfigType_DVI, @@ -45,8 +45,7 @@ enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_VGA }; -struct single_display_configuration -{ +struct single_display_configuration { uint32_t controller_index; uint32_t controller_id; uint32_t signal_type; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index edcb85560ced..32054ecf0b87 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -244,8 +244,7 @@ enum pp_df_cstate { * @PP_PWR_LIMIT_DEFAULT: Default Power Limit * @PP_PWR_LIMIT_MAX: Maximum Power Limit */ -enum pp_power_limit_level -{ +enum pp_power_limit_level { PP_PWR_LIMIT_MIN = -1, PP_PWR_LIMIT_CURRENT, PP_PWR_LIMIT_DEFAULT, @@ -260,8 +259,7 @@ enum pp_power_limit_level * @PP_PWR_TYPE_FAST: manages the ~10 ms moving average of APU power, * where supported. */ -enum pp_power_type -{ +enum pp_power_type { PP_PWR_TYPE_SUSTAINED, PP_PWR_TYPE_FAST, }; diff --git a/drivers/gpu/drm/amd/include/navi12_ip_offset.h b/drivers/gpu/drm/amd/include/navi12_ip_offset.h index d8fc00478b6a..e94d80ec8d92 100644 --- a/drivers/gpu/drm/amd/include/navi12_ip_offset.h +++ b/drivers/gpu/drm/amd/include/navi12_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/navi14_ip_offset.h b/drivers/gpu/drm/amd/include/navi14_ip_offset.h index c39ef651adc6..508011288dea 100644 --- a/drivers/gpu/drm/amd/include/navi14_ip_offset.h +++ b/drivers/gpu/drm/amd/include/navi14_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h index 5aac8d545bdc..2e8e6c9875f6 100644 --- a/drivers/gpu/drm/amd/include/pptable.h +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -491,7 +491,7 @@ typedef struct _ClockInfoArray{ //sizeof(ATOM_PPLIB_CLOCK_INFO) UCHAR ucEntrySize; - UCHAR clockInfo[1]; + UCHAR clockInfo[]; }ClockInfoArray; typedef struct _NonClockInfoArray{ @@ -501,7 +501,7 @@ typedef struct _NonClockInfoArray{ //sizeof(ATOM_PPLIB_NONCLOCK_INFO) UCHAR ucEntrySize; - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[]; }NonClockInfoArray; typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record @@ -658,7 +658,7 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ UCHAR numEntries; - ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; + ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[]; }ATOM_PPLIB_SAMClk_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_SAMU_Table diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h index 7dff85c81e5a..fa023cfdf72d 100644 --- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h +++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h b/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h index b07bc2dd895d..054790470800 100644 --- a/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h +++ b/drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h @@ -25,13 +25,11 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/v10_structs.h b/drivers/gpu/drm/amd/include/v10_structs.h index c0e98a98a641..58002a83d1df 100644 --- a/drivers/gpu/drm/amd/include/v10_structs.h +++ b/drivers/gpu/drm/amd/include/v10_structs.h @@ -24,8 +24,7 @@ #ifndef V10_STRUCTS_H_ #define V10_STRUCTS_H_ -struct v10_gfx_mqd -{ +struct v10_gfx_mqd { uint32_t reserved_0; // offset: 0 (0x0) uint32_t reserved_1; // offset: 1 (0x1) uint32_t reserved_2; // offset: 2 (0x2) diff --git a/drivers/gpu/drm/amd/include/vangogh_ip_offset.h b/drivers/gpu/drm/amd/include/vangogh_ip_offset.h index 691073ed780e..695d7d04dfa6 100644 --- a/drivers/gpu/drm/amd/include/vangogh_ip_offset.h +++ b/drivers/gpu/drm/amd/include/vangogh_ip_offset.h @@ -28,13 +28,11 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/include/vega10_ip_offset.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h index 3a22a5d16919..1e1ca69f21f7 100644 --- a/drivers/gpu/drm/amd/include/vega10_ip_offset.h +++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h @@ -24,13 +24,11 @@ #define MAX_INSTANCE 5 #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h index 1deb68f3d334..92cf2d9e767f 100644 --- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h +++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h @@ -25,139 +25,137 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; -static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } }, +static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE DCE_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } }, +static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } }, +static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE NBIO_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } }, +static const struct IP_BASE SDMA1_BASE = { { { { 0x00001860, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } }, +static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } }, { { 0, 0x00009000, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; /* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/ -static const struct IP_BASE VCE_BASE ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE VCE_BASE = { { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE XDMA_BASE ={ { { { 0x00003400, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 6627ee07d52d..f84bfed50681 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -693,6 +693,21 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si return ret; } +int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = smu_send_rma_reason(smu); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t *min, diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 3047ffe7f244..621200e0823f 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -450,6 +450,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable); int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size); int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size); +int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev); int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t *min, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index f503e61faa60..b1b4c09c3467 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -226,7 +226,7 @@ int atomctrl_set_engine_dram_timings_rv770( return amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - (uint32_t *)&engine_clock_parameters); + (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters)); } /* @@ -297,7 +297,7 @@ int atomctrl_get_memory_pll_dividers_si( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - (uint32_t *)&mpll_parameters); + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = @@ -345,7 +345,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - (uint32_t *)&mpll_parameters); + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); if (!result) mpll_param->mpll_post_divider = @@ -366,7 +366,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - (uint32_t *)&mpll_parameters); + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); /* VEGAM's mpll takes sometime to finish computing */ udelay(10); @@ -396,7 +396,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_parameters); + (uint32_t *)&pll_parameters, sizeof(pll_parameters)); if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; @@ -420,7 +420,7 @@ int atomctrl_get_engine_pll_dividers_vi( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_patameters); + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); if (0 == result) { dividers->pll_post_divider = @@ -457,7 +457,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_patameters); + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); if (0 == result) { dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac); @@ -490,7 +490,7 @@ int atomctrl_get_dfs_pll_dividers_vi( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_patameters); + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); if (0 == result) { dividers->pll_post_divider = @@ -773,7 +773,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -794,7 +794,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -814,7 +814,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -835,7 +835,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -857,7 +857,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -878,7 +878,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -909,7 +909,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -1134,7 +1134,7 @@ int atomctrl_get_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - (uint32_t *)&get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); *voltage = result ? 0 : le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) @@ -1179,7 +1179,7 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - (uint32_t *)&get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); if (0 != result) return result; @@ -1359,7 +1359,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&efuse_param); + (uint32_t *)&efuse_param, sizeof(efuse_param)); *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; @@ -1380,7 +1380,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - (uint32_t *)&memory_clock_parameters); + (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters)); return result; } @@ -1399,7 +1399,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - (uint32_t *)&get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); *voltage = result ? 0 : le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); @@ -1526,7 +1526,7 @@ int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, SetVoltage), - (uint32_t *)voltage_parameters); + (uint32_t *)voltage_parameters, sizeof(*voltage_parameters)); *virtual_voltage_id = voltage_parameters->usVoltageLevel; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c index a47a47238e2b..82d540334318 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c @@ -258,7 +258,7 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, idx = GetIndexIntoMasterCmdTable(computegpuclockparam); if (amdgpu_atom_execute_table( - adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters)) + adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters, sizeof(pll_parameters))) return -EINVAL; pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) @@ -505,7 +505,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); if (amdgpu_atom_execute_table( - adev->mode_info.atom_context, ix, (uint32_t *)¶meters)) + adev->mode_info.atom_context, ix, (uint32_t *)¶meters, sizeof(parameters))) return -EINVAL; output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 0ad947df777a..eedb9a4f7e2d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -751,6 +751,7 @@ static int smu_early_init(void *handle) static int smu_set_default_dpm_table(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; struct smu_power_gate *power_gate = &smu_power->power_gate; int vcn_gate, jpeg_gate; @@ -759,25 +760,34 @@ static int smu_set_default_dpm_table(struct smu_context *smu) if (!smu->ppt_funcs->set_default_dpm_table) return 0; - vcn_gate = atomic_read(&power_gate->vcn_gated); - jpeg_gate = atomic_read(&power_gate->jpeg_gated); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + vcn_gate = atomic_read(&power_gate->vcn_gated); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) + jpeg_gate = atomic_read(&power_gate->jpeg_gated); - ret = smu_dpm_set_vcn_enable(smu, true); - if (ret) - return ret; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + ret = smu_dpm_set_vcn_enable(smu, true); + if (ret) + return ret; + } - ret = smu_dpm_set_jpeg_enable(smu, true); - if (ret) - goto err_out; + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + ret = smu_dpm_set_jpeg_enable(smu, true); + if (ret) + goto err_out; + } ret = smu->ppt_funcs->set_default_dpm_table(smu); if (ret) dev_err(smu->adev->dev, "Failed to setup default dpm clock tables!\n"); - smu_dpm_set_jpeg_enable(smu, !jpeg_gate); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) + smu_dpm_set_jpeg_enable(smu, !jpeg_gate); err_out: - smu_dpm_set_vcn_enable(smu, !vcn_gate); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + smu_dpm_set_vcn_enable(smu, !vcn_gate); + return ret; } @@ -3669,3 +3679,13 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) return ret; } + +int smu_send_rma_reason(struct smu_context *smu) +{ + int ret = 0; + + if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason) + ret = smu->ppt_funcs->send_rma_reason(smu); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 66e84defd0b6..a870bdd49a4e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1342,6 +1342,11 @@ struct pptable_funcs { int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); /** + * @send_rma_reason: message rma reason event to SMU. + */ + int (*send_rma_reason)(struct smu_context *smu); + + /** * @get_ecc_table: message SMU to get ECC INFO table. */ ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); @@ -1588,5 +1593,6 @@ int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); +int smu_send_rma_reason(struct smu_context *smu); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 509e3cd483fb..86758051cb93 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -91,7 +91,8 @@ #define PPSMC_MSG_QueryValidMcaCeCount 0x3A #define PPSMC_MSG_McaBankCeDumpDW 0x3B #define PPSMC_MSG_SelectPLPDMode 0x40 -#define PPSMC_Message_Count 0x41 +#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 +#define PPSMC_Message_Count 0x44 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 953a767613b1..a941fdbf78b6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -261,7 +261,8 @@ __SMU_DUMMY_MAP(SetSoftMaxVpe), \ __SMU_DUMMY_MAP(SetSoftMinVpe), \ __SMU_DUMMY_MAP(GetMetricsVersion), \ - __SMU_DUMMY_MAP(EnableUCLKShadow), + __SMU_DUMMY_MAP(EnableUCLKShadow), \ + __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index c7bfa68bf00f..f6545093bfc1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -514,7 +514,7 @@ static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, getsmuclockinfo); ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, - (uint32_t *)&input); + (uint32_t *)&input, sizeof(input)); if (ret) return -EINVAL; @@ -1432,24 +1432,24 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); orderly_poweroff(true); } else if (client_id == SOC15_IH_CLIENTID_MP1) { - if (src_id == 0xfe) { + if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { /* ACK SMUToHost interrupt */ data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); switch (ctxid) { - case 0x3: + case SMU_IH_INTERRUPT_CONTEXT_ID_AC: dev_dbg(adev->dev, "Switched to AC mode!\n"); schedule_work(&smu->interrupt_work); adev->pm.ac_power = true; break; - case 0x4: + case SMU_IH_INTERRUPT_CONTEXT_ID_DC: dev_dbg(adev->dev, "Switched to DC mode!\n"); schedule_work(&smu->interrupt_work); adev->pm.ac_power = false; break; - case 0x7: + case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: /* * Increment the throttle interrupt counter */ @@ -1462,6 +1462,10 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, schedule_work(&smu->throttling_logging_work); break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); + break; } } } @@ -1504,7 +1508,7 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu) return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, - 0xfe, + SMU_IH_INTERRUPT_ID_TO_DRIVER, irq_src); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 5e408a195860..ed15f5a0fd11 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -301,7 +301,7 @@ static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, getsmuclockinfo); ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, - (uint32_t *)&input); + (uint32_t *)&input, sizeof(input)); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index c486182ff275..48170bb5112e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1369,24 +1369,24 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); orderly_poweroff(true); } else if (client_id == SOC15_IH_CLIENTID_MP1) { - if (src_id == 0xfe) { + if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { /* ACK SMUToHost interrupt */ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); switch (ctxid) { - case 0x3: + case SMU_IH_INTERRUPT_CONTEXT_ID_AC: dev_dbg(adev->dev, "Switched to AC mode!\n"); smu_v13_0_ack_ac_dc_interrupt(smu); adev->pm.ac_power = true; break; - case 0x4: + case SMU_IH_INTERRUPT_CONTEXT_ID_DC: dev_dbg(adev->dev, "Switched to DC mode!\n"); smu_v13_0_ack_ac_dc_interrupt(smu); adev->pm.ac_power = false; break; - case 0x7: + case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: /* * Increment the throttle interrupt counter */ @@ -1399,7 +1399,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, schedule_work(&smu->throttling_logging_work); break; - case 0x8: + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL: high = smu->thermal_range.software_shutdown_temp + smu->thermal_range.software_shutdown_temp_offset; high = min_t(typeof(high), @@ -1416,7 +1416,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); break; - case 0x9: + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY: high = min_t(typeof(high), SMU_THERMAL_MAXIMUM_ALERT_TEMP, smu->thermal_range.software_shutdown_temp); @@ -1429,6 +1429,10 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); + break; } } } @@ -1473,7 +1477,7 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu) return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, - 0xfe, + SMU_IH_INTERRUPT_ID_TO_DRIVER, irq_src); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 7e1941cf1796..45a84fd5dc04 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -45,6 +45,7 @@ #include <linux/pci.h> #include "amdgpu_ras.h" #include "amdgpu_mca.h" +#include "amdgpu_aca.h" #include "smu_cmn.h" #include "mp/mp_13_0_6_offset.h" #include "mp/mp_13_0_6_sh_mask.h" @@ -171,6 +172,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), + MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), }; // clang-format on @@ -1438,7 +1440,10 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, entry->src_data[1]); schedule_work(&smu->throttling_logging_work); } - + break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); break; } } @@ -1574,6 +1579,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; struct smu_13_0_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table; + struct smu_13_0_dpm_table *uclk_table = + &dpm_context->dpm_tables.uclk_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; int ret; @@ -1589,17 +1596,27 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, return 0; case AMD_DPM_FORCED_LEVEL_AUTO: - if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) && - (gfx_table->max == pstate_table->gfxclk_pstate.curr.max)) - return 0; + if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) || + (gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) { + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( + smu, gfx_table->min, gfx_table->max); + if (ret) + return ret; - ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( - smu, gfx_table->min, gfx_table->max); - if (ret) - return ret; + pstate_table->gfxclk_pstate.curr.min = gfx_table->min; + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; + } + + if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { + /* Min UCLK is not expected to be changed */ + ret = smu_v13_0_set_soft_freq_limited_range( + smu, SMU_UCLK, 0, uclk_table->max); + if (ret) + return ret; + pstate_table->uclk_pstate.curr.max = uclk_table->max; + } + pstate_table->uclk_pstate.custom.max = 0; - pstate_table->gfxclk_pstate.curr.min = gfx_table->min; - pstate_table->gfxclk_pstate.curr.max = gfx_table->max; return 0; case AMD_DPM_FORCED_LEVEL_MANUAL: return 0; @@ -1622,7 +1639,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, uint32_t max_clk; int ret = 0; - if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK && + clk_type != SMU_UCLK) return -EINVAL; if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) && @@ -1632,18 +1650,31 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (min >= max) { dev_err(smu->adev->dev, - "Minimum GFX clk should be less than the maximum allowed clock\n"); + "Minimum clk should be less than the maximum allowed clock\n"); return -EINVAL; } - if ((min == pstate_table->gfxclk_pstate.curr.min) && - (max == pstate_table->gfxclk_pstate.curr.max)) - return 0; + if (clk_type == SMU_GFXCLK) { + if ((min == pstate_table->gfxclk_pstate.curr.min) && + (max == pstate_table->gfxclk_pstate.curr.max)) + return 0; - ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max); - if (!ret) { - pstate_table->gfxclk_pstate.curr.min = min; - pstate_table->gfxclk_pstate.curr.max = max; + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( + smu, min, max); + if (!ret) { + pstate_table->gfxclk_pstate.curr.min = min; + pstate_table->gfxclk_pstate.curr.max = max; + } + } + + if (clk_type == SMU_UCLK) { + if (max == pstate_table->uclk_pstate.curr.max) + return 0; + /* Only max clock limiting is allowed for UCLK */ + ret = smu_v13_0_set_soft_freq_limited_range( + smu, SMU_UCLK, 0, max); + if (!ret) + pstate_table->uclk_pstate.curr.max = max; } return ret; @@ -1736,6 +1767,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, return -EINVAL; } break; + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (size != 2) { + dev_err(smu->adev->dev, + "Input parameter number not correct\n"); + return -EINVAL; + } + + if (!smu_cmn_feature_is_enabled(smu, + SMU_FEATURE_DPM_UCLK_BIT)) { + dev_warn(smu->adev->dev, + "UCLK_LIMITS setting not supported!\n"); + return -EOPNOTSUPP; + } + + if (input[0] == 0) { + dev_info(smu->adev->dev, + "Setting min UCLK level is not supported"); + return -EINVAL; + } else if (input[0] == 1) { + if (input[1] > dpm_context->dpm_tables.uclk_table.max) { + dev_warn( + smu->adev->dev, + "Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n", + input[1], + dpm_context->dpm_tables.uclk_table.max); + pstate_table->uclk_pstate.custom.max = + pstate_table->uclk_pstate.curr.max; + return -EINVAL; + } + + pstate_table->uclk_pstate.custom.max = input[1]; + } + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size != 0) { dev_err(smu->adev->dev, @@ -1746,8 +1811,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return smu_v13_0_6_set_soft_freq_limited_range( + ret = smu_v13_0_6_set_soft_freq_limited_range( smu, SMU_GFXCLK, min_clk, max_clk); + + if (ret) + return ret; + + min_clk = dpm_context->dpm_tables.uclk_table.min; + max_clk = dpm_context->dpm_tables.uclk_table.max; + ret = smu_v13_0_6_set_soft_freq_limited_range( + smu, SMU_UCLK, min_clk, max_clk); + if (ret) + return ret; + pstate_table->uclk_pstate.custom.max = 0; } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1767,8 +1843,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return smu_v13_0_6_set_soft_freq_limited_range( + ret = smu_v13_0_6_set_soft_freq_limited_range( smu, SMU_GFXCLK, min_clk, max_clk); + + if (ret) + return ret; + + if (!pstate_table->uclk_pstate.custom.max) + return 0; + + min_clk = pstate_table->uclk_pstate.curr.min; + max_clk = pstate_table->uclk_pstate.custom.max; + return smu_v13_0_6_set_soft_freq_limited_range( + smu, SMU_UCLK, min_clk, max_clk); } break; default: @@ -2376,6 +2463,24 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, return ret; } +static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret; + + /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */ + if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00) + return 0; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to send BadPageThreshold event to SMU\n", + __func__); + + return ret; +} + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2547,18 +2652,22 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { uint64_t status0; + uint32_t ext_error_code; + uint32_t odecc_err_cnt; status0 = entry->regs[MCA_REG_IDX_STATUS]; + ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status0); + odecc_err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { *count = 0; return 0; } - if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0)) - *count = 1; - else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0)) - *count = 1; + if (umc_v12_0_is_deferred_error(adev, status0) || + umc_v12_0_is_uncorrectable_error(adev, status0) || + umc_v12_0_is_correctable_error(adev, status0)) + *count = (ext_error_code == 0) ? odecc_err_cnt : 1; return 0; } @@ -2857,6 +2966,143 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, }; +static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_v13_0_6_mca_set_debug_mode(smu, enable); +} + +static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) +{ + uint32_t msg; + int ret; + + if (!count) + return -EINVAL; + + switch (type) { + case ACA_ERROR_TYPE_UE: + msg = SMU_MSG_QueryValidMcaCount; + break; + case ACA_ERROR_TYPE_CE: + msg = SMU_MSG_QueryValidMcaCeCount; + break; + default: + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg(smu, msg, count); + if (ret) { + *count = 0; + return ret; + } + + return 0; +} + +static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, + enum aca_error_type type, u32 *count) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + switch (type) { + case ACA_ERROR_TYPE_UE: + case ACA_ERROR_TYPE_CE: + ret = smu_v13_0_6_get_valid_aca_count(smu, type, count); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, + int idx, int offset, u32 *val) +{ + uint32_t msg, param; + + switch (type) { + case ACA_ERROR_TYPE_UE: + msg = SMU_MSG_McaBankDumpDW; + break; + case ACA_ERROR_TYPE_CE: + msg = SMU_MSG_McaBankCeDumpDW; + break; + default: + return -EINVAL; + } + + param = ((idx & 0xffff) << 16) | (offset & 0xfffc); + + return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val); +} + +static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, + int idx, int offset, u32 *val, int count) +{ + int ret, i; + + if (!val) + return -EINVAL; + + for (i = 0; i < count; i++) { + ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]); + if (ret) + return ret; + } + + return 0; +} + +static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, + int idx, int reg_idx, u64 *val) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + u32 data[2] = {0, 0}; + int ret; + + if (!val || reg_idx >= ACA_REG_IDX_COUNT) + return -EINVAL; + + ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data)); + if (ret) + return ret; + + *val = (u64)data[1] << 32 | data[0]; + + dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", + type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); + + return 0; +} + +static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, + enum aca_error_type type, int idx, struct aca_bank *bank) +{ + int i, ret, count; + + count = min_t(int, 16, ARRAY_SIZE(bank->regs)); + for (i = 0; i < count; i++) { + ret = aca_bank_read_reg(adev, type, idx, i, &bank->regs[i]); + if (ret) + return ret; + } + + return 0; +} + +static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { + .max_ue_bank_count = 12, + .max_ce_bank_count = 12, + .set_debug_mode = aca_smu_set_debug_mode, + .get_valid_aca_count = aca_smu_get_valid_aca_count, + .get_valid_aca_bank = aca_smu_get_valid_aca_bank, +}; + static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, enum pp_xgmi_plpd_mode mode) { @@ -2895,13 +3141,6 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, return ret; } -static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu, - void *table) -{ - /* Support ecc info by default */ - return 0; -} - static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -2956,7 +3195,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, - .get_ecc_info = smu_v13_0_6_get_ecc_info, + .send_rma_reason = smu_v13_0_6_send_rma_reason, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) @@ -2969,4 +3208,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); + amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 4894f7ee737b..2aa7e9945a0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -892,7 +892,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu) // TODO: THM related ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, - 0xfe, + SMU_IH_INTERRUPT_ID_TO_DRIVER, irq_src); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 00cd615bbcdc..b8dbd4e25348 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -378,8 +378,15 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, res = __smu_cmn_reg2errno(smu, reg); if (res != 0) __smu_cmn_reg_print_error(smu, reg, index, param, msg); - if (read_arg) + if (read_arg) { smu_cmn_read_arg(smu, read_arg); + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ + readval: 0x%08x\n", + smu_get_message_name(smu, msg), index, param, reg, *read_arg); + } else { + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", + smu_get_message_name(smu, msg), index, param, reg); + } Out: if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { amdgpu_device_halt(adev); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index cc590e27d88a..81bfce1406e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -30,6 +30,16 @@ #define FDO_PWM_MODE_STATIC 1 #define FDO_PWM_MODE_STATIC_RPM 5 +#define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2 +#define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3 +#define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4 +#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 +#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 +#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 + extern const int link_speed[]; /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 8be235144f6d..b5518ff97165 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -604,10 +604,10 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, * ADV75xx helpers */ -static struct edid *adv7511_get_edid(struct adv7511 *adv7511, - struct drm_connector *connector) +static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511, + struct drm_connector *connector) { - struct edid *edid; + const struct drm_edid *drm_edid; /* Reading the EDID only works if the device is powered */ if (!adv7511->powered) { @@ -621,31 +621,44 @@ static struct edid *adv7511_get_edid(struct adv7511 *adv7511, edid_i2c_addr); } - edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511); + drm_edid = drm_edid_read_custom(connector, adv7511_get_edid_block, adv7511); if (!adv7511->powered) __adv7511_power_off(adv7511); - adv7511_set_config_csc(adv7511, connector, adv7511->rgb, - drm_detect_hdmi_monitor(edid)); + if (drm_edid) { + /* + * FIXME: The CEC physical address should be set using + * cec_s_phys_addr(adap, + * connector->display_info.source_physical_address, false) from + * a path that has read the EDID and called + * drm_edid_connector_update(). + */ + const struct edid *edid = drm_edid_raw(drm_edid); - cec_s_phys_addr_from_edid(adv7511->cec_adap, edid); + adv7511_set_config_csc(adv7511, connector, adv7511->rgb, + drm_detect_hdmi_monitor(edid)); - return edid; + cec_s_phys_addr_from_edid(adv7511->cec_adap, edid); + } else { + cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL); + } + + return drm_edid; } static int adv7511_get_modes(struct adv7511 *adv7511, struct drm_connector *connector) { - struct edid *edid; + const struct drm_edid *drm_edid; unsigned int count; - edid = adv7511_get_edid(adv7511, connector); + drm_edid = adv7511_edid_read(adv7511, connector); - drm_connector_update_edid_property(connector, edid); - count = drm_add_edid_modes(connector, edid); + drm_edid_connector_update(connector, drm_edid); + count = drm_edid_connector_add_modes(connector); - kfree(edid); + drm_edid_free(drm_edid); return count; } @@ -953,12 +966,12 @@ static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge return adv7511_detect(adv, NULL); } -static struct edid *adv7511_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct adv7511 *adv = bridge_to_adv7511(bridge); - return adv7511_get_edid(adv, connector); + return adv7511_edid_read(adv, connector); } static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge, @@ -977,7 +990,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = { .mode_valid = adv7511_bridge_mode_valid, .attach = adv7511_bridge_attach, .detect = adv7511_bridge_detect, - .get_edid = adv7511_bridge_get_edid, + .edid_read = adv7511_bridge_edid_read, .hpd_notify = adv7511_bridge_hpd_notify, }; @@ -1277,17 +1290,6 @@ static int adv7511_probe(struct i2c_client *i2c) INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); - if (i2c->irq) { - init_waitqueue_head(&adv7511->wq); - - ret = devm_request_threaded_irq(dev, i2c->irq, NULL, - adv7511_irq_handler, - IRQF_ONESHOT, dev_name(dev), - adv7511); - if (ret) - goto err_unregister_cec; - } - adv7511_power_off(adv7511); i2c_set_clientdata(i2c, adv7511); @@ -1311,6 +1313,17 @@ static int adv7511_probe(struct i2c_client *i2c) adv7511_audio_init(dev, adv7511); + if (i2c->irq) { + init_waitqueue_head(&adv7511->wq); + + ret = devm_request_threaded_irq(dev, i2c->irq, NULL, + adv7511_irq_handler, + IRQF_ONESHOT, dev_name(dev), + adv7511); + if (ret) + goto err_unregister_audio; + } + if (adv7511->info->has_dsi) { ret = adv7533_attach_dsi(adv7511); if (ret) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 29d91493b101..9d96d28d6fe8 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1784,24 +1784,14 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, return ret; } -static struct edid *anx7625_get_edid(struct anx7625_data *ctx) +static const struct drm_edid *anx7625_edid_read(struct anx7625_data *ctx) { struct device *dev = ctx->dev; struct s_edid_data *p_edid = &ctx->slimport_edid_p; int edid_num; - u8 *edid; - edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL); - if (!edid) { - DRM_DEV_ERROR(dev, "Fail to allocate buffer\n"); - return NULL; - } - - if (ctx->slimport_edid_p.edid_block_num > 0) { - memcpy(edid, ctx->slimport_edid_p.edid_raw_data, - FOUR_BLOCK_SIZE); - return (struct edid *)edid; - } + if (ctx->slimport_edid_p.edid_block_num > 0) + goto out; pm_runtime_get_sync(dev); _anx7625_hpd_polling(ctx, 5000 * 100); @@ -1810,14 +1800,14 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx) if (edid_num < 1) { DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num); - kfree(edid); return NULL; } p_edid->edid_block_num = edid_num; - memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE); - return (struct edid *)edid; +out: + return drm_edid_alloc(ctx->slimport_edid_p.edid_raw_data, + FOUR_BLOCK_SIZE); } static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx) @@ -2492,15 +2482,15 @@ anx7625_bridge_detect(struct drm_bridge *bridge) return anx7625_sink_detect(ctx); } -static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n"); - return anx7625_get_edid(ctx); + return anx7625_edid_read(ctx); } static const struct drm_bridge_funcs anx7625_bridge_funcs = { @@ -2515,7 +2505,7 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = { .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .detect = anx7625_bridge_detect, - .get_edid = anx7625_bridge_get_edid, + .edid_read = anx7625_bridge_edid_read, }; static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx, diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 7d470527455b..e226acc5c15e 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -1505,33 +1505,35 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp) mhdp->link_up = false; } -static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp, - struct drm_connector *connector) +static const struct drm_edid *cdns_mhdp_edid_read(struct cdns_mhdp_device *mhdp, + struct drm_connector *connector) { if (!mhdp->plugged) return NULL; - return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp); + return drm_edid_read_custom(connector, cdns_mhdp_get_edid_block, mhdp); } static int cdns_mhdp_get_modes(struct drm_connector *connector) { struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector); - struct edid *edid; + const struct drm_edid *drm_edid; int num_modes; if (!mhdp->plugged) return 0; - edid = cdns_mhdp_get_edid(mhdp, connector); - if (!edid) { + drm_edid = cdns_mhdp_edid_read(mhdp, connector); + + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid) { dev_err(mhdp->dev, "Failed to read EDID\n"); return 0; } - drm_connector_update_edid_property(connector, edid); - num_modes = drm_add_edid_modes(connector, edid); - kfree(edid); + num_modes = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); /* * HACK: Warn about unsupported display formats until we deal @@ -2220,12 +2222,12 @@ static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *brid return cdns_mhdp_detect(mhdp); } -static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *cdns_mhdp_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); - return cdns_mhdp_get_edid(mhdp, connector); + return cdns_mhdp_edid_read(mhdp, connector); } static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { @@ -2239,7 +2241,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { .atomic_reset = cdns_mhdp_bridge_atomic_reset, .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts, .detect = cdns_mhdp_bridge_detect, - .get_edid = cdns_mhdp_bridge_get_edid, + .edid_read = cdns_mhdp_bridge_edid_read, .hpd_enable = cdns_mhdp_bridge_hpd_enable, .hpd_disable = cdns_mhdp_bridge_hpd_disable, }; diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 08bd5695ddae..ab8e00baf3f1 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -81,12 +81,12 @@ display_connector_detect(struct drm_bridge *bridge) } } -static struct edid *display_connector_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct display_connector *conn = to_display_connector(bridge); - return drm_get_edid(connector, conn->bridge.ddc); + return drm_edid_read_ddc(connector, conn->bridge.ddc); } /* @@ -172,7 +172,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, static const struct drm_bridge_funcs display_connector_bridge_funcs = { .attach = display_connector_attach, .detect = display_connector_detect, - .get_edid = display_connector_get_edid, + .edid_read = display_connector_edid_read, .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index b589136ca6da..27334173e911 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -458,7 +458,7 @@ struct it6505 { /* it6505 driver hold option */ bool enable_drv_hold; - struct edid *cached_edid; + const struct drm_edid *cached_edid; }; struct it6505_step_train_para { @@ -2263,7 +2263,7 @@ static void it6505_plugged_status_to_codec(struct it6505 *it6505) static void it6505_remove_edid(struct it6505 *it6505) { - kfree(it6505->cached_edid); + drm_edid_free(it6505->cached_edid); it6505->cached_edid = NULL; } @@ -3034,15 +3034,16 @@ it6505_bridge_detect(struct drm_bridge *bridge) return it6505_detect(it6505); } -static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *it6505_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; if (!it6505->cached_edid) { - it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block, - it6505); + it6505->cached_edid = drm_edid_read_custom(connector, + it6505_get_edid_block, + it6505); if (!it6505->cached_edid) { DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!"); @@ -3050,7 +3051,7 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge, } } - return drm_edid_duplicate(it6505->cached_edid); + return drm_edid_dup(it6505->cached_edid); } static const struct drm_bridge_funcs it6505_bridge_funcs = { @@ -3065,7 +3066,7 @@ static const struct drm_bridge_funcs it6505_bridge_funcs = { .atomic_pre_enable = it6505_bridge_atomic_pre_enable, .atomic_post_disable = it6505_bridge_atomic_post_disable, .detect = it6505_bridge_detect, - .get_edid = it6505_bridge_get_edid, + .edid_read = it6505_bridge_edid_read, }; static __maybe_unused int it6505_bridge_resume(struct device *dev) diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 1cf3fb1f13dc..1c3433b5e366 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -874,33 +874,33 @@ static void it66121_bridge_hpd_disable(struct drm_bridge *bridge) dev_err(ctx->dev, "failed to disable HPD IRQ\n"); } -static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *it66121_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); - struct edid *edid; + const struct drm_edid *drm_edid; int ret; mutex_lock(&ctx->lock); ret = it66121_preamble_ddc(ctx); if (ret) { - edid = NULL; + drm_edid = NULL; goto out_unlock; } ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG, IT66121_DDC_HEADER_EDID); if (ret) { - edid = NULL; + drm_edid = NULL; goto out_unlock; } - edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx); + drm_edid = drm_edid_read_custom(connector, it66121_get_edid_block, ctx); out_unlock: mutex_unlock(&ctx->lock); - return edid; + return drm_edid; } static const struct drm_bridge_funcs it66121_bridge_funcs = { @@ -916,7 +916,7 @@ static const struct drm_bridge_funcs it66121_bridge_funcs = { .mode_set = it66121_bridge_mode_set, .mode_valid = it66121_bridge_mode_valid, .detect = it66121_bridge_detect, - .get_edid = it66121_bridge_get_edid, + .edid_read = it66121_bridge_edid_read, .hpd_enable = it66121_bridge_hpd_enable, .hpd_disable = it66121_bridge_hpd_disable, }; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index b9205d14d943..a9c7e2b07ea1 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -847,13 +847,13 @@ lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge, lt9611_sleep_setup(lt9611); } -static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *lt9611_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); lt9611_power_on(lt9611); - return drm_do_get_edid(connector, lt9611_get_edid_block, lt9611); + return drm_edid_read_custom(connector, lt9611_get_edid_block, lt9611); } static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge) @@ -893,7 +893,7 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, .mode_valid = lt9611_bridge_mode_valid, .detect = lt9611_bridge_detect, - .get_edid = lt9611_bridge_get_edid, + .edid_read = lt9611_bridge_edid_read, .hpd_enable = lt9611_bridge_hpd_enable, .atomic_pre_enable = lt9611_bridge_atomic_pre_enable, diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 4eaf99618749..bcf8bccd86d6 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -495,8 +495,8 @@ static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, siz return 0; }; -static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *lt9611uxc_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); int ret; @@ -510,7 +510,7 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, return NULL; } - return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); + return drm_edid_read_custom(connector, lt9611uxc_get_edid_block, lt9611uxc); } static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { @@ -518,7 +518,7 @@ static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { .mode_valid = lt9611uxc_bridge_mode_valid, .mode_set = lt9611uxc_bridge_mode_set, .detect = lt9611uxc_bridge_detect, - .get_edid = lt9611uxc_bridge_get_edid, + .edid_read = lt9611uxc_bridge_edid_read, }; static int lt9611uxc_parse_dt(struct device *dev, diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index e93083bbec9d..4480523244e4 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -91,26 +91,26 @@ static int stdp2690_read_block(void *context, u8 *buf, unsigned int block, size_ return 0; } -static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *ge_b850v3_lvds_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct i2c_client *client; client = ge_b850v3_lvds_ptr->stdp2690_i2c; - return drm_do_get_edid(connector, stdp2690_read_block, client); + return drm_edid_read_custom(connector, stdp2690_read_block, client); } static int ge_b850v3_lvds_get_modes(struct drm_connector *connector) { - struct edid *edid; + const struct drm_edid *drm_edid; int num_modes; - edid = ge_b850v3_lvds_get_edid(&ge_b850v3_lvds_ptr->bridge, connector); + drm_edid = ge_b850v3_lvds_edid_read(&ge_b850v3_lvds_ptr->bridge, connector); - drm_connector_update_edid_property(connector, edid); - num_modes = drm_add_edid_modes(connector, edid); - kfree(edid); + drm_edid_connector_update(connector, drm_edid); + num_modes = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); return num_modes; } @@ -226,7 +226,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge, static const struct drm_bridge_funcs ge_b850v3_lvds_funcs = { .attach = ge_b850v3_lvds_attach, .detect = ge_b850v3_lvds_bridge_detect, - .get_edid = ge_b850v3_lvds_get_edid, + .edid_read = ge_b850v3_lvds_edid_read, }; static int ge_b850v3_lvds_init(struct device *dev) diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 7c0076e49953..ed93fd4c3265 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -154,10 +154,11 @@ static void ptn3460_disable(struct drm_bridge *bridge) } -static struct edid *ptn3460_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *ptn3460_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + const struct drm_edid *drm_edid = NULL; bool power_off; u8 *edid; int ret; @@ -175,27 +176,28 @@ static struct edid *ptn3460_get_edid(struct drm_bridge *bridge, EDID_LENGTH); if (ret) { kfree(edid); - edid = NULL; goto out; } + drm_edid = drm_edid_alloc(edid, EDID_LENGTH); + out: if (power_off) ptn3460_disable(&ptn_bridge->bridge); - return (struct edid *)edid; + return drm_edid; } static int ptn3460_connector_get_modes(struct drm_connector *connector) { struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - struct edid *edid; + const struct drm_edid *drm_edid; int num_modes; - edid = ptn3460_get_edid(&ptn_bridge->bridge, connector); - drm_connector_update_edid_property(connector, edid); - num_modes = drm_add_edid_modes(connector, edid); - kfree(edid); + drm_edid = ptn3460_edid_read(&ptn_bridge->bridge, connector); + drm_edid_connector_update(connector, drm_edid); + num_modes = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); return num_modes; } @@ -254,7 +256,7 @@ static const struct drm_bridge_funcs ptn3460_bridge_funcs = { .pre_enable = ptn3460_pre_enable, .disable = ptn3460_disable, .attach = ptn3460_bridge_attach, - .get_edid = ptn3460_get_edid, + .edid_read = ptn3460_edid_read, }; static int ptn3460_probe(struct i2c_client *client) diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 4560ae9cbce1..8f84e98249c7 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -278,39 +278,35 @@ static const struct drm_connector_funcs sii902x_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static struct edid *sii902x_get_edid(struct sii902x *sii902x, - struct drm_connector *connector) +static const struct drm_edid *sii902x_edid_read(struct sii902x *sii902x, + struct drm_connector *connector) { - struct edid *edid; + const struct drm_edid *drm_edid; mutex_lock(&sii902x->mutex); - edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]); - if (edid) { - if (drm_detect_hdmi_monitor(edid)) - sii902x->sink_is_hdmi = true; - else - sii902x->sink_is_hdmi = false; - } + drm_edid = drm_edid_read_ddc(connector, sii902x->i2cmux->adapter[0]); mutex_unlock(&sii902x->mutex); - return edid; + return drm_edid; } static int sii902x_get_modes(struct drm_connector *connector) { struct sii902x *sii902x = connector_to_sii902x(connector); - struct edid *edid; + const struct drm_edid *drm_edid; int num = 0; - edid = sii902x_get_edid(sii902x, connector); - drm_connector_update_edid_property(connector, edid); - if (edid) { - num = drm_add_edid_modes(connector, edid); - kfree(edid); + drm_edid = sii902x_edid_read(sii902x, connector); + drm_edid_connector_update(connector, drm_edid); + if (drm_edid) { + num = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); } + sii902x->sink_is_hdmi = connector->display_info.is_hdmi; + return num; } @@ -465,12 +461,12 @@ static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge return sii902x_detect(sii902x); } -static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *sii902x_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct sii902x *sii902x = bridge_to_sii902x(bridge); - return sii902x_get_edid(sii902x, connector); + return sii902x_edid_read(sii902x, connector); } static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, @@ -514,7 +510,7 @@ static const struct drm_bridge_funcs sii902x_bridge_funcs = { .disable = sii902x_bridge_disable, .enable = sii902x_bridge_enable, .detect = sii902x_bridge_detect, - .get_edid = sii902x_bridge_get_edid, + .edid_read = sii902x_bridge_edid_read, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index ca74a20015b3..cceb5aab6c83 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2454,27 +2454,35 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) return result; } -static struct edid *dw_hdmi_get_edid(struct dw_hdmi *hdmi, - struct drm_connector *connector) +static const struct drm_edid *dw_hdmi_edid_read(struct dw_hdmi *hdmi, + struct drm_connector *connector) { - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; if (!hdmi->ddc) return NULL; - edid = drm_get_edid(connector, hdmi->ddc); - if (!edid) { + drm_edid = drm_edid_read_ddc(connector, hdmi->ddc); + if (!drm_edid) { dev_dbg(hdmi->dev, "failed to get edid\n"); return NULL; } + /* + * FIXME: This should use connector->display_info.is_hdmi and + * connector->display_info.has_audio from a path that has read the EDID + * and called drm_edid_connector_update(). + */ + edid = drm_edid_raw(drm_edid); + dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", edid->width_cm, edid->height_cm); hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); hdmi->sink_has_audio = drm_detect_monitor_audio(edid); - return edid; + return drm_edid; } /* ----------------------------------------------------------------------------- @@ -2493,17 +2501,16 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); - struct edid *edid; + const struct drm_edid *drm_edid; int ret; - edid = dw_hdmi_get_edid(hdmi, connector); - if (!edid) - return 0; + drm_edid = dw_hdmi_edid_read(hdmi, connector); - drm_connector_update_edid_property(connector, edid); - cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); + drm_edid_connector_update(connector, drm_edid); + cec_notifier_set_phys_addr(hdmi->cec_notifier, + connector->display_info.source_physical_address); + ret = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); return ret; } @@ -2980,12 +2987,12 @@ static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge return dw_hdmi_detect(hdmi); } -static struct edid *dw_hdmi_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *dw_hdmi_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct dw_hdmi *hdmi = bridge->driver_private; - return dw_hdmi_get_edid(hdmi, connector); + return dw_hdmi_edid_read(hdmi, connector); } static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { @@ -3002,7 +3009,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .mode_set = dw_hdmi_bridge_mode_set, .mode_valid = dw_hdmi_bridge_mode_valid, .detect = dw_hdmi_bridge_detect, - .get_edid = dw_hdmi_bridge_get_edid, + .edid_read = dw_hdmi_bridge_edid_read, }; /* ----------------------------------------------------------------------------- @@ -3542,6 +3549,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.interlace_allowed = true; hdmi->bridge.ddc = hdmi->ddc; hdmi->bridge.of_node = pdev->dev.of_node; + hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = dev; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index f10ba91dc252..166f9a3e9622 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1651,19 +1651,19 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge, drm_mode_copy(&tc->mode, mode); } -static struct edid *tc_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); - return drm_get_edid(connector, &tc->aux.ddc); + return drm_edid_read_ddc(connector, &tc->aux.ddc); } static int tc_connector_get_modes(struct drm_connector *connector) { struct tc_data *tc = connector_to_tc(connector); int num_modes; - struct edid *edid; + const struct drm_edid *drm_edid; int ret; ret = tc_get_display_props(tc); @@ -1678,9 +1678,10 @@ static int tc_connector_get_modes(struct drm_connector *connector) return num_modes; } - edid = tc_get_edid(&tc->bridge, connector); - num_modes = drm_add_edid_modes(connector, edid); - kfree(edid); + drm_edid = tc_edid_read(&tc->bridge, connector); + drm_edid_connector_update(connector, drm_edid); + num_modes = drm_edid_connector_add_modes(connector); + drm_edid_free(drm_edid); return num_modes; } @@ -1849,7 +1850,7 @@ static const struct drm_bridge_funcs tc_edp_bridge_funcs = { .atomic_enable = tc_edp_bridge_atomic_enable, .atomic_disable = tc_edp_bridge_atomic_disable, .detect = tc_bridge_detect, - .get_edid = tc_get_edid, + .edid_read = tc_edid_read, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 62cc3893dca5..61dc6f063fb4 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1207,19 +1207,19 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge) : connector_status_disconnected; } -static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); - return drm_get_edid(connector, &pdata->aux.ddc); + return drm_edid_read_ddc(connector, &pdata->aux.ddc); } static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .attach = ti_sn_bridge_attach, .detach = ti_sn_bridge_detach, .mode_valid = ti_sn_bridge_mode_valid, - .get_edid = ti_sn_bridge_get_edid, + .edid_read = ti_sn_bridge_edid_read, .detect = ti_sn_bridge_detect, .atomic_pre_enable = ti_sn_bridge_atomic_pre_enable, .atomic_enable = ti_sn_bridge_atomic_enable, diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 355b794ef2b1..0857773e5c5f 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -119,7 +119,10 @@ msm:apq8016: DRIVER_NAME: msm BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc-usb-host.dtb GPU_VERSION: apq8016 - BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS" + # disabling unused clocks congests with the MDSS runtime PM trying to + # disable those clocks and causes boot to fail. + # Reproducer: DRM_MSM=y, DRM_I2C_ADV7511=m + BM_KERNEL_EXTRA_ARGS: clk_ignore_unused RUNNER_TAG: google-freedreno-db410c script: - ./install/bare-metal/fastboot.sh diff --git a/drivers/gpu/drm/ci/testlist.txt b/drivers/gpu/drm/ci/testlist.txt index eaeb751bb0ad..3377f002f8c5 100644 --- a/drivers/gpu/drm/ci/testlist.txt +++ b/drivers/gpu/drm/ci/testlist.txt @@ -100,7 +100,7 @@ kms_atomic@plane-invalid-params-fence kms_atomic@crtc-invalid-params kms_atomic@crtc-invalid-params-fence kms_atomic@atomic-invalid-params -kms_atomic@atomic_plane_damage +kms_atomic@atomic-plane-damage kms_atomic_interruptible@legacy-setmode kms_atomic_interruptible@atomic-setmode kms_atomic_interruptible@legacy-dpms @@ -321,726 +321,726 @@ kms_bw@linear-tiling-7-displays-3840x2160p kms_bw@linear-tiling-8-displays-1920x1080p kms_bw@linear-tiling-8-displays-2560x1440p kms_bw@linear-tiling-8-displays-3840x2160p -kms_ccs@pipe-A-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-A-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-A-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-A-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-A-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-A-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-A-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-A-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-B-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-B-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-B-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-B-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-B-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-B-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-B-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-C-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-C-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-C-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-C-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-C-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-C-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-C-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-D-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-D-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-D-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-D-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-D-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-D-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-D-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-E-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-E-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-E-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-E-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-E-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-E-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-E-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-F-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-F-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-F-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-F-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-F-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-F-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-F-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-G-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-G-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-G-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-G-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-G-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-G-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-G-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-bad-pixel-format-y_tiled_ccs -kms_ccs@pipe-H-bad-pixel-format-yf_tiled_ccs -kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs -kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_mc_ccs -kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-bad-rotation-90-y_tiled_ccs -kms_ccs@pipe-H-bad-rotation-90-yf_tiled_ccs -kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs -kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_mc_ccs -kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-crc-primary-basic-y_tiled_ccs -kms_ccs@pipe-H-crc-primary-basic-yf_tiled_ccs -kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-yf_tiled_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_mc_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-random-ccs-data-y_tiled_ccs -kms_ccs@pipe-H-random-ccs-data-yf_tiled_ccs -kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs -kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_mc_ccs -kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_ccs -kms_ccs@pipe-H-missing-ccs-buffer-yf_tiled_ccs -kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_ccs -kms_ccs@pipe-H-ccs-on-another-bo-yf_tiled_ccs -kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-bad-aux-stride-y_tiled_ccs -kms_ccs@pipe-H-bad-aux-stride-yf_tiled_ccs -kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs_cc -kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-yf_tiled_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc -kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc -kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs -kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc +kms_ccs@pipe-A-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-A-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-A-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-A-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-A-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-A-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-A-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-A-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-A-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-A-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-A-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-A-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-A-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-A-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-B-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-B-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-B-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-B-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-B-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-B-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-B-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-B-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-B-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-B-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-B-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-B-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-B-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-C-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-C-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-C-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-C-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-C-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-C-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-C-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-C-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-C-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-C-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-C-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-C-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-C-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-D-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-D-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-D-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-D-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-D-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-D-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-D-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-D-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-D-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-D-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-D-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-D-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-D-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-E-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-E-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-E-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-E-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-E-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-E-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-E-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-E-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-E-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-E-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-E-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-E-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-E-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-F-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-F-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-F-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-F-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-F-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-F-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-F-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-F-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-F-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-F-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-F-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-F-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-F-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-G-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-G-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-G-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-G-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-G-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-G-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-G-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-G-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-G-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-G-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-G-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-G-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-G-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-bad-pixel-format-y-tiled-ccs +kms_ccs@pipe-H-bad-pixel-format-yf-tiled-ccs +kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-bad-pixel-format-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-rc-ccs +kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-mc-ccs +kms_ccs@pipe-H-bad-pixel-format-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-bad-pixel-format-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-bad-rotation-90-y-tiled-ccs +kms_ccs@pipe-H-bad-rotation-90-yf-tiled-ccs +kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-bad-rotation-90-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-rc-ccs +kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-mc-ccs +kms_ccs@pipe-H-bad-rotation-90-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-bad-rotation-90-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-crc-primary-basic-y-tiled-ccs +kms_ccs@pipe-H-crc-primary-basic-yf-tiled-ccs +kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-crc-primary-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-H-crc-primary-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-crc-primary-basic-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-yf-tiled-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-crc-primary-rotation-180-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-rc-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-mc-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-crc-primary-rotation-180-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-random-ccs-data-y-tiled-ccs +kms_ccs@pipe-H-random-ccs-data-yf-tiled-ccs +kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-random-ccs-data-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-rc-ccs +kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-mc-ccs +kms_ccs@pipe-H-random-ccs-data-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-random-ccs-data-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-ccs +kms_ccs@pipe-H-missing-ccs-buffer-yf-tiled-ccs +kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-missing-ccs-buffer-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-missing-ccs-buffer-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-ccs +kms_ccs@pipe-H-ccs-on-another-bo-yf-tiled-ccs +kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-ccs-on-another-bo-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-ccs-on-another-bo-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-bad-aux-stride-y-tiled-ccs +kms_ccs@pipe-H-bad-aux-stride-yf-tiled-ccs +kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-bad-aux-stride-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-bad-aux-stride-4-tiled-mtl-rc-ccs-cc +kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-yf-tiled-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-rc-ccs-cc +kms_ccs@pipe-H-crc-sprite-planes-basic-y-tiled-gen12-mc-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-mc-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-dg2-rc-ccs-cc +kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-mc-ccs +kms_ccs@pipe-H-crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc kms_cdclk@plane-scaling kms_cdclk@mode-transition kms_cdclk@mode-transition-all-outputs @@ -1061,21 +1061,14 @@ kms_color@deep-color kms_color@invalid-gamma-lut-sizes kms_color@invalid-degamma-lut-sizes kms_color@invalid-ctm-matrix-sizes -kms_concurrent@pipe-A -kms_concurrent@pipe-B -kms_concurrent@pipe-C -kms_concurrent@pipe-D -kms_concurrent@pipe-E -kms_concurrent@pipe-F -kms_concurrent@pipe-G -kms_concurrent@pipe-H +kms_concurrent@multi-plane-atomic-lowres kms_content_protection@legacy kms_content_protection@atomic kms_content_protection@atomic-dpms -kms_content_protection@LIC +kms_content_protection@lic kms_content_protection@type1 -kms_content_protection@mei_interface -kms_content_protection@content_type_change +kms_content_protection@mei-interface +kms_content_protection@content-type-change kms_content_protection@uevent kms_content_protection@srm kms_content_protection@dp-mst-type-0 @@ -1218,8 +1211,8 @@ kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions-varying-size kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions-varying-size kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions-varying-size kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions-varying-size -kms_dither@FB-8BPC-Vs-Panel-6BPC -kms_dither@FB-8BPC-Vs-Panel-8BPC +kms_dither@fb-8bpc-vs-panel-6bpc +kms_dither@fb-8bpc-vs-panel-8bpc kms_dp_aux_dev kms_tiled_display@basic-test-pattern kms_tiled_display@basic-test-pattern-with-chamelium @@ -2351,7 +2344,6 @@ kms_frontbuffer_tracking@psrdrrs-shrfb-scaledprimary kms_frontbuffer_tracking@fbcpsrdrrs-indfb-scaledprimary kms_frontbuffer_tracking@fbcpsrdrrs-shrfb-scaledprimary kms_frontbuffer_tracking@fbc-modesetfrombusy -kms_frontbuffer_tracking@fbc-badstride kms_frontbuffer_tracking@fbc-stridechange kms_frontbuffer_tracking@fbc-tiling-linear kms_frontbuffer_tracking@fbc-tiling-y @@ -2361,7 +2353,6 @@ kms_frontbuffer_tracking@psr-modesetfrombusy kms_frontbuffer_tracking@psr-slowdraw kms_frontbuffer_tracking@psr-suspend kms_frontbuffer_tracking@fbcpsr-modesetfrombusy -kms_frontbuffer_tracking@fbcpsr-badstride kms_frontbuffer_tracking@fbcpsr-stridechange kms_frontbuffer_tracking@fbcpsr-tiling-linear kms_frontbuffer_tracking@fbcpsr-tiling-y @@ -2372,7 +2363,6 @@ kms_frontbuffer_tracking@drrs-modesetfrombusy kms_frontbuffer_tracking@drrs-slowdraw kms_frontbuffer_tracking@drrs-suspend kms_frontbuffer_tracking@fbcdrrs-modesetfrombusy -kms_frontbuffer_tracking@fbcdrrs-badstride kms_frontbuffer_tracking@fbcdrrs-stridechange kms_frontbuffer_tracking@fbcdrrs-tiling-linear kms_frontbuffer_tracking@fbcdrrs-tiling-y @@ -2383,7 +2373,6 @@ kms_frontbuffer_tracking@psrdrrs-modesetfrombusy kms_frontbuffer_tracking@psrdrrs-slowdraw kms_frontbuffer_tracking@psrdrrs-suspend kms_frontbuffer_tracking@fbcpsrdrrs-modesetfrombusy -kms_frontbuffer_tracking@fbcpsrdrrs-badstride kms_frontbuffer_tracking@fbcpsrdrrs-stridechange kms_frontbuffer_tracking@fbcpsrdrrs-tiling-linear kms_frontbuffer_tracking@fbcpsrdrrs-tiling-y @@ -2456,7 +2445,7 @@ kms_plane@plane-position-hole-dpms kms_plane@plane-panning-top-left kms_plane@plane-panning-bottom-right kms_plane@plane-panning-bottom-right-suspend -kms_plane@invalid-pixel-format-settings +kms_plane@planar-pixel-format-settings kms_plane_alpha_blend@alpha-basic kms_plane_alpha_blend@alpha-7efc kms_plane_alpha_blend@coverage-7efc @@ -2479,24 +2468,24 @@ kms_plane_multiple@tiling-x kms_plane_multiple@tiling-y kms_plane_multiple@tiling-yf kms_plane_multiple@tiling-4 -kms_plane_scaling@plane-upscale-with-pixel-format-20x20 -kms_plane_scaling@plane-upscale-with-pixel-format-factor-0-25 -kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25 -kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5 -kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75 -kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling -kms_plane_scaling@plane-upscale-with-rotation-20x20 -kms_plane_scaling@plane-upscale-with-rotation-factor-0-25 -kms_plane_scaling@plane-downscale-with-rotation-factor-0-25 -kms_plane_scaling@plane-downscale-with-rotation-factor-0-5 -kms_plane_scaling@plane-downscale-with-rotation-factor-0-75 -kms_plane_scaling@plane-scaler-with-rotation-unity-scaling -kms_plane_scaling@plane-upscale-with-modifiers-20x20 -kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25 -kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25 -kms_plane_scaling@plane-downscale-with-modifiers-factor-0-5 -kms_plane_scaling@plane-downscale-with-modifiers-factor-0-75 -kms_plane_scaling@plane-scaler-with-modifiers-unity-scaling +kms_plane_scaling@plane-upscale-20x20-with-pixel-format +kms_plane_scaling@plane-upscale-factor-0-25-with-pixel-format +kms_plane_scaling@plane-downscale-factor-0-25-with-pixel-format +kms_plane_scaling@plane-downscale-factor-0-5-with-pixel-format +kms_plane_scaling@plane-downscale-factor-0-75-with-pixel-format +kms_plane_scaling@plane-scaler-unity-scaling-with-pixel-format +kms_plane_scaling@plane-upscale-20x20-with-rotation +kms_plane_scaling@plane-upscale-factor-0-25-with-rotation +kms_plane_scaling@plane-downscale-factor-0-25-with-rotation +kms_plane_scaling@plane-downscale-factor-0-5-with-rotation +kms_plane_scaling@plane-downscale-factor-0-75-with-rotation +kms_plane_scaling@plane-scaler-unity-scaling-with-rotation +kms_plane_scaling@plane-upscale-20x20-with-modifiers +kms_plane_scaling@plane-upscale-factor-0-25-with-modifiers +kms_plane_scaling@plane-downscale-factor-0-25-with-modifiers +kms_plane_scaling@plane-downscale-factor-0-5-with-modifiers +kms_plane_scaling@plane-downscale-factor-0-75-with-modifiers +kms_plane_scaling@plane-scaler-unity-scaling-with-modifiers kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation kms_plane_scaling@plane-scaler-with-clipping-clamping-modifiers @@ -2551,48 +2540,69 @@ kms_properties@invalid-properties-legacy kms_properties@invalid-properties-atomic kms_properties@get_properties-sanity-atomic kms_properties@get_properties-sanity-non-atomic -kms_psr@basic -kms_psr@no_drrs -kms_psr@primary_page_flip -kms_psr@primary_mmap_gtt -kms_psr@primary_mmap_cpu -kms_psr@primary_blt -kms_psr@primary_render -kms_psr@sprite_mmap_gtt -kms_psr@cursor_mmap_gtt -kms_psr@sprite_mmap_cpu -kms_psr@cursor_mmap_cpu -kms_psr@sprite_blt -kms_psr@cursor_blt -kms_psr@sprite_render -kms_psr@cursor_render -kms_psr@sprite_plane_move -kms_psr@cursor_plane_move -kms_psr@sprite_plane_onoff -kms_psr@cursor_plane_onoff -kms_psr@dpms -kms_psr@suspend -kms_psr@psr2_basic -kms_psr@psr2_no_drrs -kms_psr@psr2_primary_page_flip -kms_psr@psr2_primary_mmap_gtt -kms_psr@psr2_primary_mmap_cpu -kms_psr@psr2_primary_blt -kms_psr@psr2_primary_render -kms_psr@psr2_sprite_mmap_gtt -kms_psr@psr2_cursor_mmap_gtt -kms_psr@psr2_sprite_mmap_cpu -kms_psr@psr2_cursor_mmap_cpu -kms_psr@psr2_sprite_blt -kms_psr@psr2_cursor_blt -kms_psr@psr2_sprite_render -kms_psr@psr2_cursor_render -kms_psr@psr2_sprite_plane_move -kms_psr@psr2_cursor_plane_move -kms_psr@psr2_sprite_plane_onoff -kms_psr@psr2_cursor_plane_onoff -kms_psr@psr2_dpms -kms_psr@psr2_suspend +kms_psr@pr-basic +kms_psr@pr-no-drrs +kms_psr@pr-primary-page-flip +kms_psr@pr-primary-mmap-gtt +kms_psr@pr-primary-mmap-cpu +kms_psr@pr-primary-blt +kms_psr@pr-primary-render +kms_psr@pr-sprite-mmap-gtt +kms_psr@pr-cursor-mmap-gtt +kms_psr@pr-sprite-mmap-cpu +kms_psr@pr-cursor-mmap-cpu +kms_psr@pr-sprite-blt +kms_psr@pr-cursor-blt +kms_psr@pr-sprite-render +kms_psr@pr-cursor-render +kms_psr@pr-sprite-plane-move +kms_psr@pr-cursor-plane-move +kms_psr@pr-sprite-plane-onoff +kms_psr@pr-cursor-plane-onoff +kms_psr@pr-dpms +kms_psr@pr-suspend +kms_psr@psr-basic +kms_psr@psr-no-drrs +kms_psr@psr-primary-page-flip +kms_psr@psr-primary-mmap-gtt +kms_psr@psr-primary-mmap-cpu +kms_psr@psr-primary-blt +kms_psr@psr-primary-render +kms_psr@psr-sprite-mmap-gtt +kms_psr@psr-cursor-mmap-gtt +kms_psr@psr-sprite-mmap-cpu +kms_psr@psr-cursor-mmap-cpu +kms_psr@psr-sprite-blt +kms_psr@psr-cursor-blt +kms_psr@psr-sprite-render +kms_psr@psr-cursor-render +kms_psr@psr-sprite-plane-move +kms_psr@psr-cursor-plane-move +kms_psr@psr-sprite-plane-onoff +kms_psr@psr-cursor-plane-onoff +kms_psr@psr-dpms +kms_psr@psr-suspend +kms_psr@psr2-basic +kms_psr@psr2-no-drrs +kms_psr@psr2-primary-page-flip +kms_psr@psr2-primary-mmap-gtt +kms_psr@psr2-primary-mmap-cpu +kms_psr@psr2-primary-blt +kms_psr@psr2-primary-render +kms_psr@psr2-sprite-mmap-gtt +kms_psr@psr2-cursor-mmap-gtt +kms_psr@psr2-sprite-mmap-cpu +kms_psr@psr2-cursor-mmap-cpu +kms_psr@psr2-sprite-blt +kms_psr@psr2-cursor-blt +kms_psr@psr2-sprite-render +kms_psr@psr2-cursor-render +kms_psr@psr2-sprite-plane-move +kms_psr@psr2-cursor-plane-move +kms_psr@psr2-sprite-plane-onoff +kms_psr@psr2-cursor-plane-onoff +kms_psr@psr2-dpms +kms_psr@psr2-suspend kms_psr2_sf@primary-plane-update-sf-dmg-area kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb kms_psr2_sf@overlay-plane-update-sf-dmg-area @@ -2643,13 +2653,13 @@ kms_scaling_modes@scaling-mode-full kms_scaling_modes@scaling-mode-center kms_scaling_modes@scaling-mode-full-aspect kms_scaling_modes@scaling-mode-none -kms_selftest@drm_cmdline -kms_selftest@drm_damage -kms_selftest@drm_dp_mst +kms_selftest@drm_cmdline_parser +kms_selftest@drm_damage_helper +kms_selftest@drm_dp_mst_helper kms_selftest@drm_format_helper kms_selftest@drm_format -kms_selftest@framebuffer -kms_selftest@drm_plane +kms_selftest@drm_framebuffer +kms_selftest@drm_plane_helper kms_setmode@basic kms_setmode@basic-clone-single-crtc kms_setmode@invalid-clone-single-crtc @@ -2658,248 +2668,38 @@ kms_setmode@clone-exclusive-crtc kms_setmode@invalid-clone-single-crtc-stealing kms_sysfs_edid_timing kms_tv_load_detect@load-detect -kms_universal_plane@universal-plane-pipe-A-functional -kms_universal_plane@universal-plane-pipe-A-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-A -kms_universal_plane@cursor-fb-leak-pipe-A -kms_universal_plane@universal-plane-pageflip-windowed-pipe-A -kms_universal_plane@universal-plane-pipe-B-functional -kms_universal_plane@universal-plane-pipe-B-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-B -kms_universal_plane@cursor-fb-leak-pipe-B -kms_universal_plane@universal-plane-pageflip-windowed-pipe-B -kms_universal_plane@universal-plane-pipe-C-functional -kms_universal_plane@universal-plane-pipe-C-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-C -kms_universal_plane@cursor-fb-leak-pipe-C -kms_universal_plane@universal-plane-pageflip-windowed-pipe-C -kms_universal_plane@universal-plane-pipe-D-functional -kms_universal_plane@universal-plane-pipe-D-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-D -kms_universal_plane@cursor-fb-leak-pipe-D -kms_universal_plane@universal-plane-pageflip-windowed-pipe-D -kms_universal_plane@universal-plane-pipe-E-functional -kms_universal_plane@universal-plane-pipe-E-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-E -kms_universal_plane@cursor-fb-leak-pipe-E -kms_universal_plane@universal-plane-pageflip-windowed-pipe-E -kms_universal_plane@universal-plane-pipe-F-functional -kms_universal_plane@universal-plane-pipe-F-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-F -kms_universal_plane@cursor-fb-leak-pipe-F -kms_universal_plane@universal-plane-pageflip-windowed-pipe-F -kms_universal_plane@universal-plane-pipe-G-functional -kms_universal_plane@universal-plane-pipe-G-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-G -kms_universal_plane@cursor-fb-leak-pipe-G -kms_universal_plane@universal-plane-pageflip-windowed-pipe-G -kms_universal_plane@universal-plane-pipe-H-functional -kms_universal_plane@universal-plane-pipe-H-sanity -kms_universal_plane@disable-primary-vs-flip-pipe-H -kms_universal_plane@cursor-fb-leak-pipe-H -kms_universal_plane@universal-plane-pageflip-windowed-pipe-H +kms_universal_plane@universal-plane-functional +kms_universal_plane@universal-plane-sanity +kms_universal_plane@disable-primary-vs-flip +kms_universal_plane@cursor-fb-leak +kms_universal_plane@universal-plane-pageflip-windowed kms_vblank@invalid kms_vblank@crtc-id -kms_vblank@pipe-A-accuracy-idle -kms_vblank@pipe-A-query-idle -kms_vblank@pipe-A-query-idle-hang -kms_vblank@pipe-A-query-forked -kms_vblank@pipe-A-query-forked-hang -kms_vblank@pipe-A-query-busy -kms_vblank@pipe-A-query-busy-hang -kms_vblank@pipe-A-query-forked-busy -kms_vblank@pipe-A-query-forked-busy-hang -kms_vblank@pipe-A-wait-idle -kms_vblank@pipe-A-wait-idle-hang -kms_vblank@pipe-A-wait-forked -kms_vblank@pipe-A-wait-forked-hang -kms_vblank@pipe-A-wait-busy -kms_vblank@pipe-A-wait-busy-hang -kms_vblank@pipe-A-wait-forked-busy -kms_vblank@pipe-A-wait-forked-busy-hang -kms_vblank@pipe-A-ts-continuation-idle -kms_vblank@pipe-A-ts-continuation-idle-hang -kms_vblank@pipe-A-ts-continuation-dpms-rpm -kms_vblank@pipe-A-ts-continuation-dpms-suspend -kms_vblank@pipe-A-ts-continuation-suspend -kms_vblank@pipe-A-ts-continuation-modeset -kms_vblank@pipe-A-ts-continuation-modeset-hang -kms_vblank@pipe-A-ts-continuation-modeset-rpm -kms_vblank@pipe-B-accuracy-idle -kms_vblank@pipe-B-query-idle -kms_vblank@pipe-B-query-idle-hang -kms_vblank@pipe-B-query-forked -kms_vblank@pipe-B-query-forked-hang -kms_vblank@pipe-B-query-busy -kms_vblank@pipe-B-query-busy-hang -kms_vblank@pipe-B-query-forked-busy -kms_vblank@pipe-B-query-forked-busy-hang -kms_vblank@pipe-B-wait-idle -kms_vblank@pipe-B-wait-idle-hang -kms_vblank@pipe-B-wait-forked -kms_vblank@pipe-B-wait-forked-hang -kms_vblank@pipe-B-wait-busy -kms_vblank@pipe-B-wait-busy-hang -kms_vblank@pipe-B-wait-forked-busy -kms_vblank@pipe-B-wait-forked-busy-hang -kms_vblank@pipe-B-ts-continuation-idle -kms_vblank@pipe-B-ts-continuation-idle-hang -kms_vblank@pipe-B-ts-continuation-dpms-rpm -kms_vblank@pipe-B-ts-continuation-dpms-suspend -kms_vblank@pipe-B-ts-continuation-suspend -kms_vblank@pipe-B-ts-continuation-modeset -kms_vblank@pipe-B-ts-continuation-modeset-hang -kms_vblank@pipe-B-ts-continuation-modeset-rpm -kms_vblank@pipe-C-accuracy-idle -kms_vblank@pipe-C-query-idle -kms_vblank@pipe-C-query-idle-hang -kms_vblank@pipe-C-query-forked -kms_vblank@pipe-C-query-forked-hang -kms_vblank@pipe-C-query-busy -kms_vblank@pipe-C-query-busy-hang -kms_vblank@pipe-C-query-forked-busy -kms_vblank@pipe-C-query-forked-busy-hang -kms_vblank@pipe-C-wait-idle -kms_vblank@pipe-C-wait-idle-hang -kms_vblank@pipe-C-wait-forked -kms_vblank@pipe-C-wait-forked-hang -kms_vblank@pipe-C-wait-busy -kms_vblank@pipe-C-wait-busy-hang -kms_vblank@pipe-C-wait-forked-busy -kms_vblank@pipe-C-wait-forked-busy-hang -kms_vblank@pipe-C-ts-continuation-idle -kms_vblank@pipe-C-ts-continuation-idle-hang -kms_vblank@pipe-C-ts-continuation-dpms-rpm -kms_vblank@pipe-C-ts-continuation-dpms-suspend -kms_vblank@pipe-C-ts-continuation-suspend -kms_vblank@pipe-C-ts-continuation-modeset -kms_vblank@pipe-C-ts-continuation-modeset-hang -kms_vblank@pipe-C-ts-continuation-modeset-rpm -kms_vblank@pipe-D-accuracy-idle -kms_vblank@pipe-D-query-idle -kms_vblank@pipe-D-query-idle-hang -kms_vblank@pipe-D-query-forked -kms_vblank@pipe-D-query-forked-hang -kms_vblank@pipe-D-query-busy -kms_vblank@pipe-D-query-busy-hang -kms_vblank@pipe-D-query-forked-busy -kms_vblank@pipe-D-query-forked-busy-hang -kms_vblank@pipe-D-wait-idle -kms_vblank@pipe-D-wait-idle-hang -kms_vblank@pipe-D-wait-forked -kms_vblank@pipe-D-wait-forked-hang -kms_vblank@pipe-D-wait-busy -kms_vblank@pipe-D-wait-busy-hang -kms_vblank@pipe-D-wait-forked-busy -kms_vblank@pipe-D-wait-forked-busy-hang -kms_vblank@pipe-D-ts-continuation-idle -kms_vblank@pipe-D-ts-continuation-idle-hang -kms_vblank@pipe-D-ts-continuation-dpms-rpm -kms_vblank@pipe-D-ts-continuation-dpms-suspend -kms_vblank@pipe-D-ts-continuation-suspend -kms_vblank@pipe-D-ts-continuation-modeset -kms_vblank@pipe-D-ts-continuation-modeset-hang -kms_vblank@pipe-D-ts-continuation-modeset-rpm -kms_vblank@pipe-E-accuracy-idle -kms_vblank@pipe-E-query-idle -kms_vblank@pipe-E-query-idle-hang -kms_vblank@pipe-E-query-forked -kms_vblank@pipe-E-query-forked-hang -kms_vblank@pipe-E-query-busy -kms_vblank@pipe-E-query-busy-hang -kms_vblank@pipe-E-query-forked-busy -kms_vblank@pipe-E-query-forked-busy-hang -kms_vblank@pipe-E-wait-idle -kms_vblank@pipe-E-wait-idle-hang -kms_vblank@pipe-E-wait-forked -kms_vblank@pipe-E-wait-forked-hang -kms_vblank@pipe-E-wait-busy -kms_vblank@pipe-E-wait-busy-hang -kms_vblank@pipe-E-wait-forked-busy -kms_vblank@pipe-E-wait-forked-busy-hang -kms_vblank@pipe-E-ts-continuation-idle -kms_vblank@pipe-E-ts-continuation-idle-hang -kms_vblank@pipe-E-ts-continuation-dpms-rpm -kms_vblank@pipe-E-ts-continuation-dpms-suspend -kms_vblank@pipe-E-ts-continuation-suspend -kms_vblank@pipe-E-ts-continuation-modeset -kms_vblank@pipe-E-ts-continuation-modeset-hang -kms_vblank@pipe-E-ts-continuation-modeset-rpm -kms_vblank@pipe-F-accuracy-idle -kms_vblank@pipe-F-query-idle -kms_vblank@pipe-F-query-idle-hang -kms_vblank@pipe-F-query-forked -kms_vblank@pipe-F-query-forked-hang -kms_vblank@pipe-F-query-busy -kms_vblank@pipe-F-query-busy-hang -kms_vblank@pipe-F-query-forked-busy -kms_vblank@pipe-F-query-forked-busy-hang -kms_vblank@pipe-F-wait-idle -kms_vblank@pipe-F-wait-idle-hang -kms_vblank@pipe-F-wait-forked -kms_vblank@pipe-F-wait-forked-hang -kms_vblank@pipe-F-wait-busy -kms_vblank@pipe-F-wait-busy-hang -kms_vblank@pipe-F-wait-forked-busy -kms_vblank@pipe-F-wait-forked-busy-hang -kms_vblank@pipe-F-ts-continuation-idle -kms_vblank@pipe-F-ts-continuation-idle-hang -kms_vblank@pipe-F-ts-continuation-dpms-rpm -kms_vblank@pipe-F-ts-continuation-dpms-suspend -kms_vblank@pipe-F-ts-continuation-suspend -kms_vblank@pipe-F-ts-continuation-modeset -kms_vblank@pipe-F-ts-continuation-modeset-hang -kms_vblank@pipe-F-ts-continuation-modeset-rpm -kms_vblank@pipe-G-accuracy-idle -kms_vblank@pipe-G-query-idle -kms_vblank@pipe-G-query-idle-hang -kms_vblank@pipe-G-query-forked -kms_vblank@pipe-G-query-forked-hang -kms_vblank@pipe-G-query-busy -kms_vblank@pipe-G-query-busy-hang -kms_vblank@pipe-G-query-forked-busy -kms_vblank@pipe-G-query-forked-busy-hang -kms_vblank@pipe-G-wait-idle -kms_vblank@pipe-G-wait-idle-hang -kms_vblank@pipe-G-wait-forked -kms_vblank@pipe-G-wait-forked-hang -kms_vblank@pipe-G-wait-busy -kms_vblank@pipe-G-wait-busy-hang -kms_vblank@pipe-G-wait-forked-busy -kms_vblank@pipe-G-wait-forked-busy-hang -kms_vblank@pipe-G-ts-continuation-idle -kms_vblank@pipe-G-ts-continuation-idle-hang -kms_vblank@pipe-G-ts-continuation-dpms-rpm -kms_vblank@pipe-G-ts-continuation-dpms-suspend -kms_vblank@pipe-G-ts-continuation-suspend -kms_vblank@pipe-G-ts-continuation-modeset -kms_vblank@pipe-G-ts-continuation-modeset-hang -kms_vblank@pipe-G-ts-continuation-modeset-rpm -kms_vblank@pipe-H-accuracy-idle -kms_vblank@pipe-H-query-idle -kms_vblank@pipe-H-query-idle-hang -kms_vblank@pipe-H-query-forked -kms_vblank@pipe-H-query-forked-hang -kms_vblank@pipe-H-query-busy -kms_vblank@pipe-H-query-busy-hang -kms_vblank@pipe-H-query-forked-busy -kms_vblank@pipe-H-query-forked-busy-hang -kms_vblank@pipe-H-wait-idle -kms_vblank@pipe-H-wait-idle-hang -kms_vblank@pipe-H-wait-forked -kms_vblank@pipe-H-wait-forked-hang -kms_vblank@pipe-H-wait-busy -kms_vblank@pipe-H-wait-busy-hang -kms_vblank@pipe-H-wait-forked-busy -kms_vblank@pipe-H-wait-forked-busy-hang -kms_vblank@pipe-H-ts-continuation-idle -kms_vblank@pipe-H-ts-continuation-idle-hang -kms_vblank@pipe-H-ts-continuation-dpms-rpm -kms_vblank@pipe-H-ts-continuation-dpms-suspend -kms_vblank@pipe-H-ts-continuation-suspend -kms_vblank@pipe-H-ts-continuation-modeset -kms_vblank@pipe-H-ts-continuation-modeset-hang -kms_vblank@pipe-H-ts-continuation-modeset-rpm +kms_vblank@accuracy-idle +kms_vblank@query-idle +kms_vblank@query-idle-hang +kms_vblank@query-forked +kms_vblank@query-forked-hang +kms_vblank@query-busy +kms_vblank@query-busy-hang +kms_vblank@query-forked-busy +kms_vblank@query-forked-busy-hang +kms_vblank@wait-idle +kms_vblank@wait-idle-hang +kms_vblank@wait-forked +kms_vblank@wait-forked-hang +kms_vblank@wait-busy +kms_vblank@wait-busy-hang +kms_vblank@wait-forked-busy +kms_vblank@wait-forked-busy-hang +kms_vblank@ts-continuation-idle +kms_vblank@ts-continuation-idle-hang +kms_vblank@ts-continuation-dpms-rpm +kms_vblank@ts-continuation-dpms-suspend +kms_vblank@ts-continuation-suspend +kms_vblank@ts-continuation-modeset +kms_vblank@ts-continuation-modeset-hang +kms_vblank@ts-continuation-modeset-rpm kms_vrr@flip-basic kms_vrr@flip-dpms kms_vrr@flip-suspend diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt index 2cd49e8ee47f..88a1fc0a3b0d 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt @@ -1,4 +1,2 @@ kms_3d,Fail kms_addfb_basic@addfb25-bad-modifier,Fail -kms_force_connector_basic@force-edid,Fail -kms_hdmi_inject@inject-4k,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt index 7e4d8744fcc6..f0576aa629e8 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt @@ -15,3 +15,4 @@ kms_plane_alpha_blend@alpha-7efc,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail kms_rmfb@close-fd,Fail +kms_universal_plane@universal-plane-sanity,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt new file mode 100644 index 000000000000..327039f70252 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt @@ -0,0 +1,2 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt index 7e4d8744fcc6..f0576aa629e8 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt @@ -15,3 +15,4 @@ kms_plane_alpha_blend@alpha-7efc,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail kms_rmfb@close-fd,Fail +kms_universal_plane@universal-plane-sanity,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt new file mode 100644 index 000000000000..327039f70252 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt @@ -0,0 +1,2 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index b1ca3a1100da..57a32e962322 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -533,6 +533,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, mutex_lock(&aux->hw_mutex); /* + * If the device attached to the aux bus is powered down then there's + * no reason to attempt a transfer. Error out immediately. + */ + if (aux->powered_down) { + ret = -EBUSY; + goto unlock; + } + + /* * The specification doesn't give any recommendation on how often to * retry native transactions. We used to retry 7 times like for * aux i2c transactions but real world devices this wasn't @@ -600,6 +609,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset) EXPORT_SYMBOL(drm_dp_dpcd_probe); /** + * drm_dp_dpcd_set_powered() - Set whether the DP device is powered + * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here + * and the function will be a no-op. + * @powered: true if powered; false if not + * + * If the endpoint device on the DP AUX bus is known to be powered down + * then this function can be called to make future transfers fail immediately + * instead of needing to time out. + * + * If this function is never called then a device defaults to being powered. + */ +void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered) +{ + if (!aux) + return; + + mutex_lock(&aux->hw_mutex); + aux->powered_down = !powered; + mutex_unlock(&aux->hw_mutex); +} +EXPORT_SYMBOL(drm_dp_dpcd_set_powered); + +/** * drm_dp_dpcd_read() - read a series of bytes from the DPCD * @aux: DisplayPort AUX channel (SST or MST) * @offset: address of the (first) register to read @@ -1858,6 +1890,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, struct drm_dp_aux_msg msg; int err = 0; + if (aux->powered_down) + return -EBUSY; + dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES); memset(&msg, 0, sizeof(msg)); @@ -2897,26 +2932,120 @@ static const char *dp_content_type_get_name(enum dp_content_type content_type) } } -void drm_dp_vsc_sdp_log(const char *level, struct device *dev, - const struct drm_dp_vsc_sdp *vsc) +void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc) { -#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__) - DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC", + drm_printf(p, "DP SDP: VSC, revision %u, length %u\n", vsc->revision, vsc->length); - DP_SDP_LOG(" pixelformat: %s\n", + drm_printf(p, " pixelformat: %s\n", dp_pixelformat_get_name(vsc->pixelformat)); - DP_SDP_LOG(" colorimetry: %s\n", + drm_printf(p, " colorimetry: %s\n", dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry)); - DP_SDP_LOG(" bpc: %u\n", vsc->bpc); - DP_SDP_LOG(" dynamic range: %s\n", + drm_printf(p, " bpc: %u\n", vsc->bpc); + drm_printf(p, " dynamic range: %s\n", dp_dynamic_range_get_name(vsc->dynamic_range)); - DP_SDP_LOG(" content type: %s\n", + drm_printf(p, " content type: %s\n", dp_content_type_get_name(vsc->content_type)); -#undef DP_SDP_LOG } EXPORT_SYMBOL(drm_dp_vsc_sdp_log); /** + * drm_dp_vsc_sdp_supported() - check if vsc sdp is supported + * @aux: DisplayPort AUX channel + * @dpcd: DisplayPort configuration data + * + * Returns true if vsc sdp is supported, else returns false + */ +bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 rx_feature; + + if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13) + return false; + + if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) { + drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n"); + return false; + } + + return (rx_feature & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED); +} +EXPORT_SYMBOL(drm_dp_vsc_sdp_supported); + +/** + * drm_dp_vsc_sdp_pack() - pack a given vsc sdp into generic dp_sdp + * @vsc: vsc sdp initialized according to its purpose as defined in + * table 2-118 - table 2-120 in DP 1.4a specification + * @sdp: valid handle to the generic dp_sdp which will be packed + * + * Returns length of sdp on success and error code on failure + */ +ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, + struct dp_sdp *sdp) +{ + size_t length = sizeof(struct dp_sdp); + + memset(sdp, 0, sizeof(struct dp_sdp)); + + /* + * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 + * VSC SDP Header Bytes + */ + sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ + sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ + sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ + sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ + + if (vsc->revision == 0x6) { + sdp->db[0] = 1; + sdp->db[3] = 1; + } + + /* + * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry + * Format as per DP 1.4a spec and DP 2.0 respectively. + */ + if (!(vsc->revision == 0x5 || vsc->revision == 0x7)) + goto out; + + /* VSC SDP Payload for DB16 through DB18 */ + /* Pixel Encoding and Colorimetry Formats */ + sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ + sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ + + switch (vsc->bpc) { + case 6: + /* 6bpc: 0x0 */ + break; + case 8: + sdp->db[17] = 0x1; /* DB17[3:0] */ + break; + case 10: + sdp->db[17] = 0x2; + break; + case 12: + sdp->db[17] = 0x3; + break; + case 16: + sdp->db[17] = 0x4; + break; + default: + WARN(1, "Missing case %d\n", vsc->bpc); + return -EINVAL; + } + + /* Dynamic Range and Component Bit Depth */ + if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) + sdp->db[17] |= 0x80; /* DB17[7] */ + + /* Content Type */ + sdp->db[18] = vsc->content_type & 0x7; + +out: + return length; +} +EXPORT_SYMBOL(drm_dp_vsc_sdp_pack); + +/** * drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON * @dpcd: DisplayPort configuration data * @port_cap: port capabilities diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index f7c6b60629c2..03d528209426 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -1306,7 +1306,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { - struct drm_printer p = drm_debug_printer(DBG_PREFIX); + struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP, + DBG_PREFIX); drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } @@ -1593,10 +1594,11 @@ topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) } static void -__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, +__dump_topology_ref_history(struct drm_device *drm, + struct drm_dp_mst_topology_ref_history *history, void *ptr, const char *type_str) { - struct drm_printer p = drm_debug_printer(DBG_PREFIX); + struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX); char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); int i; @@ -1638,15 +1640,15 @@ out: static __always_inline void drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) { - __dump_topology_ref_history(&mstb->topology_ref_history, mstb, - "MSTB"); + __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history, + mstb, "MSTB"); } static __always_inline void drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) { - __dump_topology_ref_history(&port->topology_ref_history, port, - "Port"); + __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history, + port, "Port"); } static __always_inline void @@ -2824,7 +2826,9 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); if (ret) { if (drm_debug_enabled(DRM_UT_DP)) { - struct drm_printer p = drm_debug_printer(DBG_PREFIX); + struct drm_printer p = drm_dbg_printer(mgr->dev, + DRM_UT_DP, + DBG_PREFIX); drm_printf(&p, "sideband msg failed to send\n"); drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); @@ -2869,7 +2873,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, list_add_tail(&txmsg->next, &mgr->tx_msg_downq); if (drm_debug_enabled(DRM_UT_DP)) { - struct drm_printer p = drm_debug_printer(DBG_PREFIX); + struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP, + DBG_PREFIX); drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index a3065d4aa3d6..521a71c61b16 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1216,9 +1216,6 @@ EXPORT_SYMBOL_GPL(drm_bridge_get_modes); * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get * the EDID and return it. Otherwise return NULL. * - * If &drm_bridge_funcs.edid_read is not set, fall back to using - * &drm_bridge_funcs.get_edid and wrapping it in struct drm_edid. - * * RETURNS: * The retrieved EDID on success, or NULL otherwise. */ @@ -1228,22 +1225,6 @@ const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, if (!(bridge->ops & DRM_BRIDGE_OP_EDID)) return NULL; - /* Transitional: Fall back to ->get_edid. */ - if (!bridge->funcs->edid_read) { - const struct drm_edid *drm_edid; - struct edid *edid; - - edid = bridge->funcs->get_edid(bridge, connector); - if (!edid) - return NULL; - - drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH); - - kfree(edid); - - return drm_edid; - } - return bridge->funcs->edid_read(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_edid_read); diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index f57e6d74fb0e..c4222b886db7 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -538,7 +538,13 @@ static int __alloc_range(struct drm_buddy *mm, list_add(&block->left->tmp_link, dfs); } while (1); + if (total_allocated < size) { + err = -ENOSPC; + goto err_free; + } + list_splice_tail(&allocated, blocks); + return 0; err_undo: diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index cb90e70d85e8..82c665d3e74b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -61,13 +61,13 @@ * to one or more &drm_encoder, which are then each connected to one * &drm_connector. * - * To create a CRTC, a KMS drivers allocates and zeroes an instances of + * To create a CRTC, a KMS driver allocates and zeroes an instance of * &struct drm_crtc (possibly as part of a larger structure) and registers it * with a call to drm_crtc_init_with_planes(). * - * The CRTC is also the entry point for legacy modeset operations, see - * &drm_crtc_funcs.set_config, legacy plane operations, see - * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy + * The CRTC is also the entry point for legacy modeset operations (see + * &drm_crtc_funcs.set_config), legacy plane operations (see + * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2), and other legacy * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these * features are controlled through &drm_property and * &drm_mode_config_funcs.atomic_check. @@ -107,18 +107,6 @@ int drm_crtc_force_disable(struct drm_crtc *crtc) return drm_mode_set_config_internal(&set); } -static unsigned int drm_num_crtcs(struct drm_device *dev) -{ - unsigned int num = 0; - struct drm_crtc *tmp; - - drm_for_each_crtc(tmp, dev) { - num++; - } - - return num; -} - int drm_crtc_register_all(struct drm_device *dev) { struct drm_crtc *crtc; @@ -278,8 +266,7 @@ static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc * if (name) { crtc->name = kvasprintf(GFP_KERNEL, name, ap); } else { - crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", - drm_num_crtcs(dev)); + crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", config->num_crtc); } if (!crtc->name) { drm_mode_object_unregister(dev, &crtc->base); @@ -904,6 +891,7 @@ out: connector_set = NULL; fb = NULL; mode = NULL; + num_connectors = 0; DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 60fcb80bce61..d1c7e8298702 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -20,162 +20,28 @@ static char edid_firmware[PATH_MAX]; module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644); -MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " - "from built-in data or /lib/firmware instead. "); - -#define GENERIC_EDIDS 6 -static const char * const generic_edid_name[GENERIC_EDIDS] = { - "edid/800x600.bin", - "edid/1024x768.bin", - "edid/1280x1024.bin", - "edid/1600x1200.bin", - "edid/1680x1050.bin", - "edid/1920x1080.bin", -}; - -static const u8 generic_edid[GENERIC_EDIDS][128] = { - { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78, - 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, - 0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f, - 0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80, - 0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, - 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, - 0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, - 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53, - 0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2, - }, - { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78, - 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, - 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19, - 0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90, - 0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18, - 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, - 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, - 0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, - 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58, - 0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55, - }, - { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78, - 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, - 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a, - 0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70, - 0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, - 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, - 0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, - 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53, - 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0, - }, - { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78, - 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, - 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f, - 0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0, - 0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, - 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, - 0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, - 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55, - 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d, - }, - { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78, - 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, - 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39, - 0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0, - 0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, - 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, - 0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, - 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57, - 0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26, - }, - { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78, - 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, - 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, - 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, - 0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e, - 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, - 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, - 0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, - 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46, - 0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05, - }, -}; +MODULE_PARM_DESC(edid_firmware, + "Do not probe monitor, use specified EDID blob from /lib/firmware instead."); static const struct drm_edid *edid_load(struct drm_connector *connector, const char *name) { const struct firmware *fw = NULL; - const u8 *fwdata; const struct drm_edid *drm_edid; - int fwsize, builtin; - - builtin = match_string(generic_edid_name, GENERIC_EDIDS, name); - if (builtin >= 0) { - fwdata = generic_edid[builtin]; - fwsize = sizeof(generic_edid[builtin]); - } else { - int err; - - err = request_firmware(&fw, name, connector->dev->dev); - if (err) { - drm_err(connector->dev, - "[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n", - connector->base.id, connector->name, - name, err); - return ERR_PTR(err); - } - - fwdata = fw->data; - fwsize = fw->size; + int err; + + err = request_firmware(&fw, name, connector->dev->dev); + if (err) { + drm_err(connector->dev, + "[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n", + connector->base.id, connector->name, + name, err); + return ERR_PTR(err); } - drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded %s firmware EDID \"%s\"\n", - connector->base.id, connector->name, - builtin >= 0 ? "built-in" : "external", name); + drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded external firmware EDID \"%s\"\n", + connector->base.id, connector->name, name); - drm_edid = drm_edid_alloc(fwdata, fwsize); + drm_edid = drm_edid_alloc(fw->data, fw->size); if (!drm_edid_valid(drm_edid)) { drm_err(connector->dev, "Invalid firmware EDID \"%s\"\n", name); drm_edid_free(drm_edid); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 8c87287c3e16..638ffa4444f5 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -913,7 +913,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) DRM_GEM_OBJECT_PURGEABLE; } - if (obj->handle_count > 1) { + if (drm_gem_object_is_shared_for_memory_stats(obj)) { status.shared += obj->size; } else { status.private += obj->size; diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 8525ef851540..48fd2d67f352 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -544,7 +544,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) */ WARN_ON(!list_empty(&dev->mode_config.fb_list)); list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { - struct drm_printer p = drm_debug_printer("[leaked fb]"); + struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); drm_framebuffer_print_info(&p, 1, fb); diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index f858dfedf2cf..2c582020cb42 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (!dev) return 0; + /* + * Don't disable polling if it was never initialized + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_disable(dev); - drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); - drm_kms_helper_poll_enable(dev); + /* + * Don't enable polling if it was never initialized + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); + return PTR_ERR(state); } @@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev) dev->mode_config.suspend_state = NULL; drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); - drm_kms_helper_poll_enable(dev); + /* + * Don't enable polling if it is not initialized + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); return ret; } diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 918065982db4..7694b85e75e3 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -91,7 +91,7 @@ static noinline depot_stack_handle_t __drm_stack_depot_save(void) static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) { - struct drm_printer p = drm_debug_printer("drm_modeset_lock"); + struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_KMS, "drm_modeset_lock"); unsigned long *entries; unsigned int nr_entries; char *buf; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 834a5e28abbe..7352bde299d5 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, if (max_segment == 0) max_segment = UINT_MAX; err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, + (unsigned long)nr_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (err) { kfree(sg); diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index 5b93c11895bb..699b7dbffd7b 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -182,16 +182,35 @@ void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf) } EXPORT_SYMBOL(__drm_printfn_info); -void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf) +void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf) { - /* pr_debug callsite decorations are unhelpful here */ - printk(KERN_DEBUG "%s %pV", p->prefix, vaf); + const struct drm_device *drm = p->arg; + const struct device *dev = drm ? drm->dev : NULL; + enum drm_debug_category category = p->category; + const char *prefix = p->prefix ?: ""; + const char *prefix_pad = p->prefix ? " " : ""; + + if (!__drm_debug_enabled(category)) + return; + + /* Note: __builtin_return_address(0) is useless here. */ + if (dev) + dev_printk(KERN_DEBUG, dev, "[" DRM_NAME "]%s%s %pV", + prefix_pad, prefix, vaf); + else + printk(KERN_DEBUG "[" DRM_NAME "]%s%s %pV", + prefix_pad, prefix, vaf); } -EXPORT_SYMBOL(__drm_printfn_debug); +EXPORT_SYMBOL(__drm_printfn_dbg); void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf) { - pr_err("*ERROR* %s %pV", p->prefix, vaf); + struct drm_device *drm = p->arg; + + if (p->prefix) + drm_err(drm, "%s %pV", p->prefix, vaf); + else + drm_err(drm, "%pV", vaf); } EXPORT_SYMBOL(__drm_printfn_err); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d1e1ade66f81..19ecb749704b 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev) * Drivers can call this helper from their device resume implementation. It is * not an error to call this even when output polling isn't enabled. * + * If device polling was never initialized before, this call will trigger a + * warning and return. + * * Note that calls to enable and disable polling must be strictly ordered, which * is automatically the case when they're only call from suspend/resume * callbacks. */ void drm_kms_helper_poll_enable(struct drm_device *dev) { - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll || - dev->mode_config.poll_running) + if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) || + !drm_kms_helper_poll || dev->mode_config.poll_running) return; if (drm_kms_helper_enable_hpd(dev) || @@ -619,8 +622,12 @@ retry: 0); } - /* Re-enable polling in case the global poll config changed. */ - drm_kms_helper_poll_enable(dev); + /* + * Re-enable polling in case the global poll config changed but polling + * is still initialized. + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", @@ -871,12 +878,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); * not an error to call this even when output polling isn't enabled or already * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). * + * If however, the polling was never initialized, this call will trigger a + * warning and return + * * Note that calls to enable and disable polling must be strictly ordered, which * is automatically the case when they're only call from suspend/resume * callbacks. */ void drm_kms_helper_poll_disable(struct drm_device *dev) { + if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled)) + return; + if (dev->mode_config.poll_running) drm_kms_helper_disable_hpd(dev); diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 84101baeecc6..a0e94217b511 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -441,6 +441,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private, u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT); int ret; + if (flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) + return -EINVAL; + if (!syncobj) return -ENOENT; @@ -1040,8 +1043,11 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, uint64_t *points; uint32_t signaled_count, i; - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) { + might_sleep(); lockdep_assert_none_held_once(); + } points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); if (points == NULL) @@ -1109,7 +1115,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, * fallthough and try a 0 timeout wait! */ - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) { for (i = 0; i < count; ++i) drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } @@ -1416,10 +1423,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, /* This happens inside the syncobj lock */ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); + if (!fence) + return; + ret = dma_fence_chain_find_seqno(&fence, entry->point); - if (ret != 0 || !fence) { + if (ret != 0) { + /* The given seqno has not been submitted yet. */ dma_fence_put(fence); return; + } else if (!fence) { + /* If dma_fence_chain_find_seqno returns 0 but sets the fence + * to NULL, it implies that the given seqno is signaled and a + * later seqno has already been submitted. Assign a stub fence + * so that the eventfd still gets signaled below. + */ + fence = dma_fence_get_stub(); } list_del_init(&entry->node); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index b5d6e3352071..3089029abba4 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT Note that this driver only supports newer device from Broadwell on. For further information and setup guide, you can visit: - http://01.org/igvt-g. + https://github.com/intel/gvt-linux/wiki. If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 91f2bc405cba..0279c8aabdd1 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -1060,3 +1060,33 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, plane_config->fb = intel_fb; } + +bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc, + const struct intel_initial_plane_config *plane_config) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + u32 base; + + if (!plane_state->uapi.visible) + return false; + + base = intel_plane_ggtt_offset(plane_state); + + /* + * We may have moved the surface to a different + * part of ggtt, make the plane aware of that. + */ + if (plane_config->base == base) + return false; + + if (DISPLAY_VER(dev_priv) >= 4) + intel_de_write(dev_priv, DSPSURF(i9xx_plane), base); + else + intel_de_write(dev_priv, DSPADDR(i9xx_plane), base); + + return true; +} diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h index b3d724a144cb..0ca12d1e6839 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.h +++ b/drivers/gpu/drm/i915/display/i9xx_plane.h @@ -26,6 +26,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe); void i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config); +bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc, + const struct intel_initial_plane_config *plane_config); #else static inline unsigned int i965_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -46,6 +48,11 @@ static inline void i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { } +static inline bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc, + const struct intel_initial_plane_config *plane_config) +{ + return false; +} #endif #endif diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 06c2455bdd78..76d77d5a0409 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -217,6 +217,9 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, int width, height; unsigned int rel_data_rate; + if (plane->id == PLANE_CURSOR) + return 0; + if (!plane_state->uapi.visible) return 0; @@ -244,9 +247,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, rel_data_rate = width * height * fb->format->cpp[color_plane]; - if (plane->id == PLANE_CURSOR) - return rel_data_rate; - return intel_adjusted_rate(&plane_state->uapi.src, &plane_state->uapi.dst, rel_data_rate); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 3f3cd944a1c5..1946d7fb3c2e 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -1465,7 +1465,7 @@ static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int if (controller == 1 && INTEL_PCH_TYPE(i915) >= PCH_ICP && - INTEL_PCH_TYPE(i915) < PCH_MTP) + INTEL_PCH_TYPE(i915) <= PCH_ADP) return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; return true; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index aa169b0055e9..5f04e495fd27 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2204,8 +2204,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) if (IS_DGFX(i915)) return vbt_pin; - if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) || - IS_ALDERLAKE_P(i915)) { + if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) { ddc_pin_map = adlp_ddc_pin_map; n_entries = ARRAY_SIZE(adlp_ddc_pin_map); } else if (IS_ALDERLAKE_S(i915)) { @@ -3074,7 +3073,7 @@ err_unmap_oprom: */ void intel_bios_init(struct drm_i915_private *i915) { - const struct vbt_header *vbt = i915->display.opregion.vbt; + const struct vbt_header *vbt; struct vbt_header *oprom_vbt = NULL; const struct bdb_header *bdb; @@ -3089,6 +3088,8 @@ void intel_bios_init(struct drm_i915_private *i915) init_vbt_defaults(i915); + vbt = intel_opregion_get_vbt(i915, NULL); + /* * If the OpRegion does not have VBT, look in SPI flash through MMIO or * PCI mapping @@ -3306,7 +3307,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ - if (i915->display.opregion.vbt) + if (intel_opregion_get_vbt(i915, NULL)) return true; } @@ -3657,3 +3658,30 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915, list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) func(i915, devdata); } + +static int intel_bios_vbt_show(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = m->private; + const void *vbt; + size_t vbt_size; + + /* + * FIXME: VBT might originate from other places than opregion, and then + * this would be incorrect. + */ + vbt = intel_opregion_get_vbt(i915, &vbt_size); + if (vbt) + seq_write(m, vbt, vbt_size); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt); + +void intel_bios_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_vbt", 0444, minor->debugfs_root, + i915, &intel_bios_vbt_fops); +} diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 49e24b7cf675..41bfb009d4b0 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -246,13 +246,10 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); -bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc); -bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); -bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port); @@ -283,4 +280,6 @@ void intel_bios_for_each_encoder(struct drm_i915_private *i915, void (*func)(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata)); +void intel_bios_debugfs_register(struct drm_i915_private *i915); + #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c985ebb6831a..26200ee3e23f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1227,183 +1227,182 @@ struct intel_cdclk_vals { u32 cdclk; u16 refclk; u16 waveform; - u8 divider; /* CD2X divider * 2 */ u8 ratio; }; static const struct intel_cdclk_vals bxt_cdclk_table[] = { - { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, - { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, - { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 }, - { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 }, - { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 }, + { .refclk = 19200, .cdclk = 144000, .ratio = 60 }, + { .refclk = 19200, .cdclk = 288000, .ratio = 60 }, + { .refclk = 19200, .cdclk = 384000, .ratio = 60 }, + { .refclk = 19200, .cdclk = 576000, .ratio = 60 }, + { .refclk = 19200, .cdclk = 624000, .ratio = 65 }, {} }; static const struct intel_cdclk_vals glk_cdclk_table[] = { - { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 }, - { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 }, - { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 }, + { .refclk = 19200, .cdclk = 79200, .ratio = 33 }, + { .refclk = 19200, .cdclk = 158400, .ratio = 33 }, + { .refclk = 19200, .cdclk = 316800, .ratio = 33 }, {} }; static const struct intel_cdclk_vals icl_cdclk_table[] = { - { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 }, - { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, - { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, - { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 }, - { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, - { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, - - { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 }, - { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, - { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, - { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 }, - { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, - { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, - - { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 19200, .cdclk = 172800, .ratio = 18 }, + { .refclk = 19200, .cdclk = 192000, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .ratio = 32 }, + { .refclk = 19200, .cdclk = 326400, .ratio = 68 }, + { .refclk = 19200, .cdclk = 556800, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 180000, .ratio = 15 }, + { .refclk = 24000, .cdclk = 192000, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .ratio = 26 }, + { .refclk = 24000, .cdclk = 324000, .ratio = 54 }, + { .refclk = 24000, .cdclk = 552000, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 172800, .ratio = 9 }, + { .refclk = 38400, .cdclk = 192000, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16 }, + { .refclk = 38400, .cdclk = 326400, .ratio = 34 }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34 }, {} }; static const struct intel_cdclk_vals rkl_cdclk_table[] = { - { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 }, - { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 }, - { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 }, - { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 }, - { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 }, - { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 }, - - { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 }, - { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 }, - { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 }, - { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 }, - { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 }, - { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 }, - - { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 }, - { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 }, - { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 }, - { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 }, - { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 }, - { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 }, + { .refclk = 19200, .cdclk = 172800, .ratio = 36 }, + { .refclk = 19200, .cdclk = 192000, .ratio = 40 }, + { .refclk = 19200, .cdclk = 307200, .ratio = 64 }, + { .refclk = 19200, .cdclk = 326400, .ratio = 136 }, + { .refclk = 19200, .cdclk = 556800, .ratio = 116 }, + { .refclk = 19200, .cdclk = 652800, .ratio = 136 }, + + { .refclk = 24000, .cdclk = 180000, .ratio = 30 }, + { .refclk = 24000, .cdclk = 192000, .ratio = 32 }, + { .refclk = 24000, .cdclk = 312000, .ratio = 52 }, + { .refclk = 24000, .cdclk = 324000, .ratio = 108 }, + { .refclk = 24000, .cdclk = 552000, .ratio = 92 }, + { .refclk = 24000, .cdclk = 648000, .ratio = 108 }, + + { .refclk = 38400, .cdclk = 172800, .ratio = 18 }, + { .refclk = 38400, .cdclk = 192000, .ratio = 20 }, + { .refclk = 38400, .cdclk = 307200, .ratio = 32 }, + { .refclk = 38400, .cdclk = 326400, .ratio = 68 }, + { .refclk = 38400, .cdclk = 556800, .ratio = 58 }, + { .refclk = 38400, .cdclk = 652800, .ratio = 68 }, {} }; static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = { - { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, - { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, - { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, + { .refclk = 19200, .cdclk = 307200, .ratio = 32 }, + { .refclk = 19200, .cdclk = 556800, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .ratio = 68 }, - { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, - { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, - { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 }, + { .refclk = 24000, .cdclk = 312000, .ratio = 26 }, + { .refclk = 24000, .cdclk = 552000, .ratio = 46 }, + { .refclk = 24400, .cdclk = 648000, .ratio = 54 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16 }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34 }, {} }; static const struct intel_cdclk_vals adlp_cdclk_table[] = { - { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, - { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, - { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, - { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, - { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, - - { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, - { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, - { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, - { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, - { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, - - { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 19200, .cdclk = 172800, .ratio = 27 }, + { .refclk = 19200, .cdclk = 192000, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .ratio = 32 }, + { .refclk = 19200, .cdclk = 556800, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 176000, .ratio = 22 }, + { .refclk = 24000, .cdclk = 192000, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .ratio = 26 }, + { .refclk = 24000, .cdclk = 552000, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 179200, .ratio = 14 }, + { .refclk = 38400, .cdclk = 192000, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16 }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34 }, {} }; static const struct intel_cdclk_vals rplu_cdclk_table[] = { - { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, - { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, - { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, - { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 }, - { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, - { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, - - { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, - { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, - { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, - { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 }, - { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, - { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, - - { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 19200, .cdclk = 172800, .ratio = 27 }, + { .refclk = 19200, .cdclk = 192000, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .ratio = 32 }, + { .refclk = 19200, .cdclk = 480000, .ratio = 50 }, + { .refclk = 19200, .cdclk = 556800, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 176000, .ratio = 22 }, + { .refclk = 24000, .cdclk = 192000, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .ratio = 26 }, + { .refclk = 24000, .cdclk = 480000, .ratio = 40 }, + { .refclk = 24000, .cdclk = 552000, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 179200, .ratio = 14 }, + { .refclk = 38400, .cdclk = 192000, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16 }, + { .refclk = 38400, .cdclk = 480000, .ratio = 25 }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34 }, {} }; static const struct intel_cdclk_vals dg2_cdclk_table[] = { - { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, - { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, - { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 }, - { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a }, - { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa }, - { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a }, - { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 }, - { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 }, - { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee }, - { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de }, - { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, - { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 163200, .ratio = 34, .waveform = 0x8888 }, + { .refclk = 38400, .cdclk = 204000, .ratio = 34, .waveform = 0x9248 }, + { .refclk = 38400, .cdclk = 244800, .ratio = 34, .waveform = 0xa4a4 }, + { .refclk = 38400, .cdclk = 285600, .ratio = 34, .waveform = 0xa54a }, + { .refclk = 38400, .cdclk = 326400, .ratio = 34, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 367200, .ratio = 34, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 408000, .ratio = 34, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 448800, .ratio = 34, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 489600, .ratio = 34, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 530400, .ratio = 34, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff }, {} }; static const struct intel_cdclk_vals mtl_cdclk_table[] = { - { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 }, - { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 }, + { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0x0000 }, + { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0x0000 }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0x0000 }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0x0000 }, {} }; static const struct intel_cdclk_vals lnl_cdclk_table[] = { - { .refclk = 38400, .cdclk = 153600, .divider = 2, .ratio = 16, .waveform = 0xaaaa }, - { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 }, - { .refclk = 38400, .cdclk = 211200, .divider = 2, .ratio = 16, .waveform = 0xdbb6 }, - { .refclk = 38400, .cdclk = 230400, .divider = 2, .ratio = 16, .waveform = 0xeeee }, - { .refclk = 38400, .cdclk = 249600, .divider = 2, .ratio = 16, .waveform = 0xf7de }, - { .refclk = 38400, .cdclk = 268800, .divider = 2, .ratio = 16, .waveform = 0xfefe }, - { .refclk = 38400, .cdclk = 288000, .divider = 2, .ratio = 16, .waveform = 0xfffe }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0xffff }, - { .refclk = 38400, .cdclk = 330000, .divider = 2, .ratio = 25, .waveform = 0xdbb6 }, - { .refclk = 38400, .cdclk = 360000, .divider = 2, .ratio = 25, .waveform = 0xeeee }, - { .refclk = 38400, .cdclk = 390000, .divider = 2, .ratio = 25, .waveform = 0xf7de }, - { .refclk = 38400, .cdclk = 420000, .divider = 2, .ratio = 25, .waveform = 0xfefe }, - { .refclk = 38400, .cdclk = 450000, .divider = 2, .ratio = 25, .waveform = 0xfffe }, - { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0xffff }, - { .refclk = 38400, .cdclk = 487200, .divider = 2, .ratio = 29, .waveform = 0xfefe }, - { .refclk = 38400, .cdclk = 522000, .divider = 2, .ratio = 29, .waveform = 0xfffe }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0xffff }, - { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, - { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 330000, .ratio = 25, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 360000, .ratio = 25, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 390000, .ratio = 25, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 420000, .ratio = 25, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 450000, .ratio = 25, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 487200, .ratio = 29, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 522000, .ratio = 29, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 571200, .ratio = 34, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 612000, .ratio = 34, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff }, {} }; @@ -1901,15 +1900,47 @@ static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) dev_priv->display.cdclk.hw.vco > 0; } +static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, + enum pipe pipe) +{ + int cdclk = cdclk_config->cdclk; + int vco = cdclk_config->vco; + int unsquashed_cdclk; + u16 waveform; + u32 val; + + waveform = cdclk_squash_waveform(i915, cdclk); + + unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len, + cdclk_squash_divider(waveform)); + + val = bxt_cdclk_cd2x_div_sel(i915, unsquashed_cdclk, vco) | + bxt_cdclk_cd2x_pipe(i915, pipe); + + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if ((IS_GEMINILAKE(i915) || IS_BROXTON(i915)) && + cdclk >= 500000) + val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + + if (DISPLAY_VER(i915) >= 20) + val |= MDCLK_SOURCE_SEL_CDCLK_PLL; + else + val |= skl_cdclk_decimal(cdclk); + + return val; +} + static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; - int unsquashed_cdclk; u16 waveform; - u32 val; if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 && !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { @@ -1926,29 +1957,10 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, waveform = cdclk_squash_waveform(dev_priv, cdclk); - unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len, - cdclk_squash_divider(waveform)); - if (HAS_CDCLK_SQUASH(dev_priv)) dg2_cdclk_squash_program(dev_priv, waveform); - val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) | - bxt_cdclk_cd2x_pipe(dev_priv, pipe); - - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - cdclk >= 500000) - val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - - if (DISPLAY_VER(dev_priv) >= 20) - val |= MDCLK_SOURCE_SEL_CDCLK_PLL; - else - val |= skl_cdclk_decimal(cdclk); - - intel_de_write(dev_priv, CDCLK_CTL, val); + intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe)); if (pipe != INVALID_PIPE) intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); @@ -2039,7 +2051,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; - int cdclk, clock, vco; + int cdclk, vco; intel_update_cdclk(dev_priv); intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); @@ -2048,20 +2060,6 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; - /* DPLL okay; verify the cdclock - * - * Some BIOS versions leave an incorrect decimal frequency value and - * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, - * so sanitize this register. - */ - cdctl = intel_de_read(dev_priv, CDCLK_CTL); - /* - * Let's ignore the pipe field, since BIOS could have configured the - * dividers both synching to an active pipe, or asynchronously - * (PIPE_NONE). - */ - cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); - /* Make sure this is a legal cdclk value for the platform */ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk); if (cdclk != dev_priv->display.cdclk.hw.cdclk) @@ -2072,24 +2070,21 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) if (vco != dev_priv->display.cdclk.hw.vco) goto sanitize; - expected = skl_cdclk_decimal(cdclk); - - /* Figure out what CD2X divider we should be using for this cdclk */ - if (HAS_CDCLK_SQUASH(dev_priv)) - clock = dev_priv->display.cdclk.hw.vco / 2; - else - clock = dev_priv->display.cdclk.hw.cdclk; - - expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, - dev_priv->display.cdclk.hw.vco); + /* + * Some BIOS versions leave an incorrect decimal frequency value and + * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, + * so sanitize this register. + */ + cdctl = intel_de_read(dev_priv, CDCLK_CTL); + expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE); /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. + * Let's ignore the pipe field, since BIOS could have configured the + * dividers both synching to an active pipe, or asynchronously + * (PIPE_NONE). */ - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - dev_priv->display.cdclk.hw.cdclk >= 500000) - expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); + expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); if (cdctl == expected) /* All well; nothing to sanitize */ @@ -3467,15 +3462,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) { u32 freq; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) - freq = dg1_rawclk(dev_priv); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL) /* * MTL always uses a 38.4 MHz rawclk. The bspec tells us * "RAWCLK_FREQ defaults to the values for 38.4 and does * not need to be programmed." */ freq = 38400; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + freq = dg1_rawclk(dev_priv); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index abaacea5c2cc..b9733a73e21d 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -42,6 +42,7 @@ #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" @@ -846,6 +847,9 @@ intel_crt_detect(struct drm_connector *connector, if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; + if (!intel_display_driver_check_access(dev_priv)) + return connector->status; + if (dev_priv->display.params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); @@ -1069,6 +1073,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) } else { intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; } + intel_connector->base.polled = intel_connector->polled; if (HAS_DDI(dev_priv)) { assert_port_valid(dev_priv, PORT_E); diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 8a84a31c7b48..25593f6aae7d 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -461,70 +461,6 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 1000 * adjusted_mode->crtc_htotal); } -static int intel_mode_vblank_start(const struct drm_display_mode *mode) -{ - int vblank_start = mode->crtc_vblank_start; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - vblank_start = DIV_ROUND_UP(vblank_start, 2); - - return vblank_start; -} - -static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state, - struct intel_crtc *crtc, - int *min, int *max, int *vblank_start) -{ - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - const struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *crtc_state; - const struct drm_display_mode *adjusted_mode; - - /* - * During fastsets/etc. the transcoder is still - * running with the old timings at this point. - * - * TODO: maybe just use the active timings here? - */ - if (intel_crtc_needs_modeset(new_crtc_state)) - crtc_state = new_crtc_state; - else - crtc_state = old_crtc_state; - - adjusted_mode = &crtc_state->hw.adjusted_mode; - - if (crtc->mode_flags & I915_MODE_FLAG_VRR) { - /* timing changes should happen with VRR disabled */ - drm_WARN_ON(state->base.dev, intel_crtc_needs_modeset(new_crtc_state) || - new_crtc_state->update_m_n || new_crtc_state->update_lrr); - - if (intel_vrr_is_push_sent(crtc_state)) - *vblank_start = intel_vrr_vmin_vblank_start(crtc_state); - else - *vblank_start = intel_vrr_vmax_vblank_start(crtc_state); - } else { - *vblank_start = intel_mode_vblank_start(adjusted_mode); - } - - /* FIXME needs to be calibrated sensibly */ - *min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode, - VBLANK_EVASION_TIME_US); - *max = *vblank_start - 1; - - /* - * M/N and TRANS_VTOTAL are double buffered on the transcoder's - * undelayed vblank, so with seamless M/N and LRR we must evade - * both vblanks. - * - * DSB execution waits for the transcoder's undelayed vblank, - * hence we must kick off the commit before that. - */ - if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr) - *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; -} - /** * intel_pipe_update_start() - start update of a set of display registers * @state: the atomic state @@ -542,14 +478,12 @@ void intel_pipe_update_start(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - long timeout = msecs_to_jiffies_timeout(1); - int scanline, min, max, vblank_start; - wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); - bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); - DEFINE_WAIT(wait); + struct intel_vblank_evade_ctx evade; + int scanline; intel_psr_lock(new_crtc_state); @@ -566,9 +500,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state, if (intel_crtc_needs_vblank_work(new_crtc_state)) intel_crtc_vblank_work_init(new_crtc_state); - intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start); - if (min <= 0 || max <= 0) - goto irq_disable; + intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade); if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) goto irq_disable; @@ -582,58 +514,14 @@ void intel_pipe_update_start(struct intel_atomic_state *state, local_irq_disable(); - crtc->debug.min_vbl = min; - crtc->debug.max_vbl = max; + crtc->debug.min_vbl = evade.min; + crtc->debug.max_vbl = evade.max; trace_intel_pipe_update_start(crtc); - for (;;) { - /* - * prepare_to_wait() has a memory barrier, which guarantees - * other CPUs can see the task state update by the time we - * read the scanline. - */ - prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); - - scanline = intel_get_crtc_scanline(crtc); - if (scanline < min || scanline > max) - break; - - if (!timeout) { - drm_err(&dev_priv->drm, - "Potential atomic update failure on pipe %c\n", - pipe_name(crtc->pipe)); - break; - } - - local_irq_enable(); - - timeout = schedule_timeout(timeout); - - local_irq_disable(); - } - - finish_wait(wq, &wait); + scanline = intel_vblank_evade(&evade); drm_crtc_vblank_put(&crtc->base); - /* - * On VLV/CHV DSI the scanline counter would appear to - * increment approx. 1/3 of a scanline before start of vblank. - * The registers still get latched at start of vblank however. - * This means we must not write any registers on the first - * line of vblank (since not the whole line is actually in - * vblank). And unfortunately we can't use the interrupt to - * wait here since it will fire too soon. We could use the - * frame start interrupt instead since it will fire after the - * critical scanline, but that would require more changes - * in the interrupt code. So for now we'll just do the nasty - * thing and poll for the bad scanline to pass us by. - * - * FIXME figure out if BXT+ DSI suffers from this as well - */ - while (need_vlv_dsi_wa && scanline == vblank_start) - scanline = intel_get_crtc_scanline(crtc); - crtc->debug.scanline_start = scanline; crtc->debug.start_vbl_time = ktime_get(); crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 49fd100ec98a..4bcf446c75f4 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -55,10 +55,9 @@ static void intel_dump_dp_vsc_sdp(struct drm_i915_private *i915, const struct drm_dp_vsc_sdp *vsc) { - if (!drm_debug_enabled(DRM_UT_KMS)) - return; + struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); - drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc); + drm_dp_vsc_sdp_log(&p, vsc); } static void diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 926e2de00eb5..f8b33999d43f 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -22,6 +22,7 @@ #include "intel_frontbuffer.h" #include "intel_psr.h" #include "intel_psr_regs.h" +#include "intel_vblank.h" #include "skl_watermark.h" #include "gem/i915_gem_object.h" @@ -47,12 +48,23 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) return base + plane_state->view.color_plane[0].offset; } -static u32 intel_cursor_position(const struct intel_plane_state *plane_state) +static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + bool early_tpt) { int x = plane_state->uapi.dst.x1; int y = plane_state->uapi.dst.y1; u32 pos = 0; + /* + * Formula from Bspec: + * MAX(-1 * <Cursor vertical size from CUR_CTL base on cursor mode + * select setting> + 1, CUR_POS Y Position - Update region Y position + */ + if (early_tpt) + y = max(-1 * drm_rect_height(&plane_state->uapi.dst) + 1, + y - crtc_state->psr2_su_area.y1); + if (x < 0) { pos |= CURSOR_POS_X_SIGN; x = -x; @@ -274,7 +286,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane, size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width); base = intel_cursor_base(plane_state); - pos = intel_cursor_position(plane_state); + pos = intel_cursor_position(crtc_state, plane_state, false); } /* On these chipsets we can only modify the base/size/stride @@ -503,17 +515,24 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) - intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) { + if (crtc_state->enable_psr2_su_region_et) { + u32 val = intel_cursor_position(crtc_state, plane_state, + true); + intel_de_write_fw(dev_priv, CURPOS_ERLY_TPT(pipe), val); + } + + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), plane_state->ctl); - else + } else { i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + } } /* TODO: split into noarm+arm pair */ @@ -536,7 +555,7 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); base = intel_cursor_base(plane_state); - pos = intel_cursor_position(plane_state); + pos = intel_cursor_position(crtc_state, plane_state, false); } /* @@ -647,12 +666,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane, { struct intel_plane *plane = to_intel_plane(_plane); struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct drm_i915_private *i915 = to_i915(plane->base.dev); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->base.state); struct intel_plane_state *new_plane_state; struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_crtc_state *new_crtc_state; + struct intel_vblank_evade_ctx evade; int ret; /* @@ -745,13 +766,25 @@ intel_legacy_cursor_update(struct drm_plane *_plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - /* - * Technically we should do a vblank evasion here to make - * sure all the cursor registers update on the same frame. - * For now just make sure the register writes happen as - * quickly as possible to minimize the race window. - */ - local_irq_disable(); + intel_vblank_evade_init(crtc_state, crtc_state, &evade); + + intel_psr_lock(crtc_state); + + if (!drm_WARN_ON(&i915->drm, drm_crtc_vblank_get(&crtc->base))) { + /* + * TODO: maybe check if we're still in PSR + * and skip the vblank evasion entirely? + */ + intel_psr_wait_for_idle_locked(crtc_state); + + local_irq_disable(); + + intel_vblank_evade(&evade); + + drm_crtc_vblank_put(&crtc->base); + } else { + local_irq_disable(); + } if (new_plane_state->uapi.visible) { intel_plane_update_noarm(plane, crtc_state, new_plane_state); @@ -762,6 +795,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane, local_irq_enable(); + intel_psr_unlock(crtc_state); + intel_plane_unpin_fb(old_plane_state); out_free: diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 6b25e195232f..288a00e083c8 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -78,7 +78,7 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder) for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane) intel_de_rmw(i915, - XELPDP_PORT_MSGBUS_TIMER(encoder->port, lane), + XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane), XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, XELPDP_PORT_MSGBUS_TIMER_VAL); } @@ -117,7 +117,7 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w static void intel_clear_response_ready_flag(struct drm_i915_private *i915, enum port port, int lane) { - intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), + intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } @@ -125,10 +125,10 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i { enum phy phy = intel_port_to_phy(i915, port); - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); @@ -144,7 +144,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, enum phy phy = intel_port_to_phy(i915, port); if (__intel_de_wait_for_register(i915, - XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), + XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_MSGBUS_TIMEOUT_FAST_US, @@ -152,7 +152,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); - if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(port, lane)) & + if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) & XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT)) drm_dbg_kms(&i915->drm, "PHY %c Hardware did not detect a timeout\n", @@ -186,7 +186,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, @@ -195,7 +195,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | XELPDP_PORT_M2P_COMMAND_READ | XELPDP_PORT_M2P_ADDRESS(addr)); @@ -253,7 +253,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, @@ -262,14 +262,14 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | XELPDP_PORT_M2P_DATA(data) | XELPDP_PORT_M2P_ADDRESS(addr)); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, @@ -282,7 +282,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); if (ack < 0) return ack; - } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) & + } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { drm_dbg_kms(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy)); @@ -2096,13 +2096,54 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, return intel_c20pll_calc_state(crtc_state, encoder); } -static bool intel_c20_use_mplla(u32 clock) +static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state) { - /* 10G and 20G rates use MPLLA */ - if (clock == 1000000 || clock == 2000000) - return true; + return state->tx[0] & C20_PHY_USE_MPLLB; +} - return false; +static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c20pll_state *pll_state) +{ + unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; + unsigned int multiplier, refclk = 38400; + unsigned int tx_clk_div; + unsigned int ref_clk_mpllb_div; + unsigned int fb_clk_div4_en; + unsigned int ref, vco; + unsigned int tx_rate_mult; + unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]); + + if (intel_c20phy_use_mpllb(pll_state)) { + tx_rate_mult = 1; + frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]); + frac_quot = pll_state->mpllb[8]; + frac_rem = pll_state->mpllb[9]; + frac_den = pll_state->mpllb[7]; + multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]); + tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]); + ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]); + fb_clk_div4_en = 0; + } else { + tx_rate_mult = 2; + frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]); + frac_quot = pll_state->mplla[8]; + frac_rem = pll_state->mplla[9]; + frac_den = pll_state->mplla[7]; + multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]); + tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]); + ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]); + fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]); + } + + if (frac_en) + frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den); + else + frac = 0; + + ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div); + vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10); + + return vco << tx_rate_mult >> tx_clk_div >> tx_rate; } static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, @@ -2138,7 +2179,7 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, PHY_C20_A_CMN_CNTX_CFG(i)); } - if (pll_state->tx[0] & C20_PHY_USE_MPLLB) { + if (intel_c20phy_use_mpllb(pll_state)) { /* MPLLB configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) @@ -2160,6 +2201,8 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, } } + pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state); + intel_cx0_phy_transaction_end(encoder, wakeref); } @@ -2174,12 +2217,12 @@ void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); - if (intel_c20_use_mplla(hw_state->clock)) { - for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) - drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); - } else { + if (intel_c20phy_use_mpllb(hw_state)) { for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); + } else { + for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) + drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); } } @@ -2326,27 +2369,27 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, } /* 3.3 mpllb or mplla configuration */ - if (intel_c20_use_mplla(clock)) { - for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { + if (intel_c20phy_use_mpllb(pll_state)) { + for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i), - pll_state->mplla[i]); + PHY_C20_A_MPLLB_CNTX_CFG(i), + pll_state->mpllb[i]); else intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i), - pll_state->mplla[i]); + PHY_C20_B_MPLLB_CNTX_CFG(i), + pll_state->mpllb[i]); } } else { - for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { + for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i), - pll_state->mpllb[i]); + PHY_C20_A_MPLLA_CNTX_CFG(i), + pll_state->mplla[i]); else intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i), - pll_state->mpllb[i]); + PHY_C20_B_MPLLA_CNTX_CFG(i), + pll_state->mplla[i]); } } @@ -2408,51 +2451,6 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, return tmpclk; } -static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, - const struct intel_c20pll_state *pll_state) -{ - unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; - unsigned int multiplier, refclk = 38400; - unsigned int tx_clk_div; - unsigned int ref_clk_mpllb_div; - unsigned int fb_clk_div4_en; - unsigned int ref, vco; - unsigned int tx_rate_mult; - unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]); - - if (pll_state->tx[0] & C20_PHY_USE_MPLLB) { - tx_rate_mult = 1; - frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]); - frac_quot = pll_state->mpllb[8]; - frac_rem = pll_state->mpllb[9]; - frac_den = pll_state->mpllb[7]; - multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]); - tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]); - ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]); - fb_clk_div4_en = 0; - } else { - tx_rate_mult = 2; - frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]); - frac_quot = pll_state->mplla[8]; - frac_rem = pll_state->mplla[9]; - frac_den = pll_state->mplla[7]; - multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]); - tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]); - ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]); - fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]); - } - - if (frac_en) - frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den); - else - frac = 0; - - ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div); - vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10); - - return vco << tx_rate_mult >> tx_clk_div >> tx_rate; -} - static void intel_program_port_clock_ctl(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, bool lane_reversal) @@ -2460,7 +2458,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 val = 0; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL, + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port), + XELPDP_PORT_REVERSAL, lane_reversal ? XELPDP_PORT_REVERSAL : 0); if (lane_reversal) @@ -2481,7 +2480,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, else val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA | XELPDP_SSC_ENABLE_PLLB, val); @@ -2514,15 +2513,16 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, u8 lane_mask, u8 state) { enum phy phy = intel_port_to_phy(i915, port); + i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port); int lane; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + intel_de_rmw(i915, buf_ctl2_reg, intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), intel_cx0_get_powerdown_state(lane_mask, state)); /* Wait for pending transactions.*/ for_each_cx0_lane_in_mask(lane_mask, lane) - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), + if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, @@ -2531,12 +2531,12 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, intel_cx0_bus_reset(i915, port, lane); } - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + intel_de_rmw(i915, buf_ctl2_reg, intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), + if (__intel_de_wait_for_register(i915, buf_ctl2_reg, intel_cx0_get_powerdown_update(lane_mask), 0, XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", @@ -2545,10 +2545,10 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port) { - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), XELPDP_POWER_STATE_READY_MASK, XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port), + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port), XELPDP_POWER_STATE_ACTIVE_MASK | XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | @@ -2593,27 +2593,27 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, XELPDP_LANE_PHY_CURRENT_STATUS(1)) : XELPDP_LANE_PHY_CURRENT_STATUS(0); - if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port), XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, lane_pipe_reset); - if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_phy_current_status, lane_phy_current_status, XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port), intel_cx0_get_pclk_refclk_request(owned_lane_mask), intel_cx0_get_pclk_refclk_request(lane_mask)); - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port), intel_cx0_get_pclk_refclk_ack(owned_lane_mask), intel_cx0_get_pclk_refclk_ack(lane_mask), XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) @@ -2624,9 +2624,10 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, CX0_P2_STATE_RESET); intel_cx0_setup_powerdown(i915, port); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0); + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status, + if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port), + lane_phy_current_status, XELPDP_PORT_RESET_END_TIMEOUT)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n", phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); @@ -2761,12 +2762,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, * 9. Set PORT_CLOCK_CTL register PCLK PLL Request * LN<Lane for maxPCLK> to "1" to enable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_ack(maxpclk_lane), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) @@ -2786,7 +2787,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 clock; - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); + u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); @@ -2839,11 +2840,11 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, */ val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock)); val |= XELPDP_FORWARD_CLOCK_UNGATE; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); /* 2. Read back PORT_CLOCK_CTL REGISTER */ - val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); + val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); /* * 3. Follow the Display Voltage Frequency Switching - Sequence @@ -2854,10 +2855,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. */ val |= XELPDP_TBT_CLOCK_REQUEST; - intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val); + intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_TBT_CLOCK_ACK, XELPDP_TBT_CLOCK_ACK, 100, 0, NULL)) @@ -2909,7 +2910,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> * to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); @@ -2919,7 +2920,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) @@ -2932,9 +2933,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) */ /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK, 0); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_FORWARD_CLOCK_UNGATE, 0); intel_cx0_phy_transaction_end(encoder, wakeref); @@ -2953,11 +2954,11 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); @@ -2970,7 +2971,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 5. Program PORT CLOCK CTRL register to disable and gate clocks */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, 0); @@ -2997,7 +2998,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder, * TODO: Determine the PLL type from the SW state, once MTL PLL * handling is done via the standard shared DPLL framework. */ - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); + u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || @@ -3016,6 +3017,9 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state, const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10; int i; + if (intel_crtc_needs_fastset(state)) + return; + for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { u8 expected = mpllb_sw_state->pll[i]; @@ -3067,10 +3071,15 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state, { struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20; - bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB; - bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB; + bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state); + bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state); int i; + I915_STATE_WARN(i915, mpll_hw_state->clock != mpll_sw_state->clock, + "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + mpll_sw_state->clock, mpll_hw_state->clock); + I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb, "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", crtc->base.base.id, crtc->base.name, diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h index adf8f4ce0d49..bdd0c8c4ef97 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h @@ -7,16 +7,39 @@ #define __INTEL_CX0_PHY_REGS_H__ #include "i915_reg_defs.h" +#include "intel_display_limits.h" + +/* + * Wrapper macro to convert from port number to the index used in some of the + * registers. For Display version 20 and above it converts the port number to a + * single range, starting with the TC offsets. When used together with + * _PICK_EVEN_2RANGES(idx, PORT_TC1, ...), this single range will be the second + * range. Example: + * + * PORT_TC1 -> PORT_TC1 + * PORT_TC2 -> PORT_TC2 + * PORT_TC3 -> PORT_TC3 + * PORT_TC4 -> PORT_TC4 + * PORT_A -> PORT_TC4 + 1 + * PORT_B -> PORT_TC4 + 2 + * ... + */ +#define __xe2lpd_port_idx(port) \ + (port >= PORT_TC1 ? port : PORT_TC4 + 1 + port - PORT_A) #define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A 0x64040 #define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B 0x64140 #define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1 0x16F240 #define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2 0x16F440 -#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ +#define _XELPDP_PORT_M2P_MSGBUS_CTL(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4) +#define XELPDP_PORT_M2P_MSGBUS_CTL(i915__, port, lane) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_M2P_MSGBUS_CTL(__xe2lpd_port_idx(port), lane) : \ + _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)) #define XELPDP_PORT_M2P_TRANSACTION_PENDING REG_BIT(31) #define XELPDP_PORT_M2P_COMMAND_TYPE_MASK REG_GENMASK(30, 27) #define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1) @@ -27,11 +50,16 @@ #define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15) #define XELPDP_PORT_M2P_ADDRESS_MASK REG_GENMASK(11, 0) #define XELPDP_PORT_M2P_ADDRESS(val) REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val) -#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ + +#define _XELPDP_PORT_P2M_MSGBUS_STATUS(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4 + 8) +#define XELPDP_PORT_P2M_MSGBUS_STATUS(i915__, port, lane) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_P2M_MSGBUS_STATUS(__xe2lpd_port_idx(port), lane) : \ + _XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) #define XELPDP_PORT_P2M_RESPONSE_READY REG_BIT(31) #define XELPDP_PORT_P2M_COMMAND_TYPE_MASK REG_GENMASK(30, 27) #define XELPDP_PORT_P2M_COMMAND_READ_ACK 0x4 @@ -54,11 +82,15 @@ #define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104 #define _XELPDP_PORT_BUF_CTL1_LN0_USBC1 0x16F200 #define _XELPDP_PORT_BUF_CTL1_LN0_USBC2 0x16F400 -#define XELPDP_PORT_BUF_CTL1(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ +#define _XELPDP_PORT_BUF_CTL1(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ _XELPDP_PORT_BUF_CTL1_LN0_A, \ _XELPDP_PORT_BUF_CTL1_LN0_B, \ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ _XELPDP_PORT_BUF_CTL1_LN0_USBC2)) +#define XELPDP_PORT_BUF_CTL1(i915__, port) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_BUF_CTL1(__xe2lpd_port_idx(port)) : \ + _XELPDP_PORT_BUF_CTL1(port)) #define XELPDP_PORT_BUF_D2D_LINK_ENABLE REG_BIT(29) #define XELPDP_PORT_BUF_D2D_LINK_STATE REG_BIT(28) #define XELPDP_PORT_BUF_SOC_PHY_READY REG_BIT(24) @@ -75,12 +107,15 @@ #define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1) #define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val) -#define XELPDP_PORT_BUF_CTL2(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ +#define _XELPDP_PORT_BUF_CTL2(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ _XELPDP_PORT_BUF_CTL1_LN0_A, \ _XELPDP_PORT_BUF_CTL1_LN0_B, \ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 4) - +#define XELPDP_PORT_BUF_CTL2(i915__, port) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_BUF_CTL2(__xe2lpd_port_idx(port)) : \ + _XELPDP_PORT_BUF_CTL2(port)) #define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30)) #define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28)) #define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24)) @@ -95,11 +130,15 @@ #define XELPDP_POWER_STATE_READY_MASK REG_GENMASK(7, 4) #define XELPDP_POWER_STATE_READY(val) REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val) -#define XELPDP_PORT_BUF_CTL3(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ +#define _XELPDP_PORT_BUF_CTL3(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ _XELPDP_PORT_BUF_CTL1_LN0_A, \ _XELPDP_PORT_BUF_CTL1_LN0_B, \ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 8) +#define XELPDP_PORT_BUF_CTL3(i915__, port) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_BUF_CTL3(__xe2lpd_port_idx(port)) : \ + _XELPDP_PORT_BUF_CTL3(port)) #define XELPDP_PLL_LANE_STAGGERING_DELAY_MASK REG_GENMASK(15, 8) #define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val) #define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0) @@ -114,11 +153,15 @@ #define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8 #define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1 0x16f258 #define _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2 0x16f458 -#define XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ +#define _XELPDP_PORT_MSGBUS_TIMER(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ _XELPDP_PORT_MSGBUS_TIMER_LN0_A, \ _XELPDP_PORT_MSGBUS_TIMER_LN0_B, \ _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC1, \ _XELPDP_PORT_MSGBUS_TIMER_LN0_USBC2) + (lane) * 4) +#define XELPDP_PORT_MSGBUS_TIMER(i915__, port, lane) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_MSGBUS_TIMER(__xe2lpd_port_idx(port), lane) : \ + _XELPDP_PORT_MSGBUS_TIMER(port, lane)) #define XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT REG_BIT(31) #define XELPDP_PORT_MSGBUS_TIMER_VAL_MASK REG_GENMASK(23, 0) #define XELPDP_PORT_MSGBUS_TIMER_VAL REG_FIELD_PREP(XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, 0xa000) @@ -127,11 +170,15 @@ #define _XELPDP_PORT_CLOCK_CTL_B 0x641E0 #define _XELPDP_PORT_CLOCK_CTL_USBC1 0x16F260 #define _XELPDP_PORT_CLOCK_CTL_USBC2 0x16F460 -#define XELPDP_PORT_CLOCK_CTL(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \ +#define _XELPDP_PORT_CLOCK_CTL(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \ _XELPDP_PORT_CLOCK_CTL_A, \ _XELPDP_PORT_CLOCK_CTL_B, \ _XELPDP_PORT_CLOCK_CTL_USBC1, \ _XELPDP_PORT_CLOCK_CTL_USBC2)) +#define XELPDP_PORT_CLOCK_CTL(i915__, port) \ + (DISPLAY_VER(i915__) >= 20 ? \ + _XELPDP_PORT_CLOCK_CTL(__xe2lpd_port_idx(port)) : \ + _XELPDP_PORT_CLOCK_CTL(port)) #define XELPDP_LANE_PCLK_PLL_REQUEST(lane) REG_BIT(31 - ((lane) * 4)) #define XELPDP_LANE_PCLK_PLL_ACK(lane) REG_BIT(30 - ((lane) * 4)) #define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane) REG_BIT(29 - ((lane) * 4)) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 12a29363e5df..bea441590204 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -178,7 +178,7 @@ static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port) int ret; /* FIXME: find out why Bspec's 100us timeout is too short */ - ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & + ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)) & XELPDP_PORT_BUF_PHY_IDLE), 10000); if (ret) drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n", @@ -226,7 +226,9 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, } if (DISPLAY_VER(dev_priv) >= 14) - ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE), + ret = _wait_for(!(intel_de_read(dev_priv, + XELPDP_PORT_BUF_CTL1(dev_priv, port)) & + XELPDP_PORT_BUF_PHY_IDLE), timeout_us, 10, 10); else ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), @@ -2429,13 +2431,22 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + i915_reg_t reg; + u32 set_bits, wait_bits; - intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0, - XELPDP_PORT_BUF_D2D_LINK_ENABLE); + if (DISPLAY_VER(dev_priv) >= 20) { + reg = DDI_BUF_CTL(port); + set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE; + wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE; + } else { + reg = XELPDP_PORT_BUF_CTL1(dev_priv, port); + set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE; + wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE; + } - if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & - XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) { - drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n", + intel_de_rmw(dev_priv, reg, 0, set_bits); + if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) { + drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n", port_name(port)); } } @@ -2448,7 +2459,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder, enum port port = encoder->port; u32 val; - val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); + val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)); val &= ~XELPDP_PORT_WIDTH_MASK; val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count)); @@ -2461,7 +2472,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder, if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL) val |= XELPDP_PORT_REVERSAL; - intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); + intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val); } static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder) @@ -2472,7 +2483,7 @@ static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder) val = intel_tc_port_in_tbt_alt_mode(dig_port) ? XELPDP_PORT_BUF_IO_SELECT_TBT : 0; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port), XELPDP_PORT_BUF_IO_SELECT_TBT, val); } @@ -2898,13 +2909,22 @@ mtl_ddi_disable_d2d_link(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + i915_reg_t reg; + u32 clr_bits, wait_bits; - intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), - XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0); + if (DISPLAY_VER(dev_priv) >= 20) { + reg = DDI_BUF_CTL(port); + clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE; + wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE; + } else { + reg = XELPDP_PORT_BUF_CTL1(dev_priv, port); + clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE; + wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE; + } - if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & - XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) - drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n", + intel_de_rmw(dev_priv, reg, clr_bits, 0); + if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100)) + drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n", port_name(port)); } @@ -3038,7 +3058,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, /* De-select Thunderbolt */ if (DISPLAY_VER(dev_priv) >= 14) - intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port), + intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port), XELPDP_PORT_BUF_IO_SELECT_TBT, 0); } @@ -3319,10 +3339,13 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL) port_buf |= XELPDP_PORT_REVERSAL; - intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), + intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port), XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf); buf_ctl |= DDI_PORT_WIDTH(lane_count); + + if (DISPLAY_VER(dev_priv) >= 20) + buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE; } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; @@ -3543,6 +3566,9 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp, /* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */ intel_dp->DP |= DDI_BUF_CTL_ENABLE; + if (DISPLAY_VER(dev_priv) >= 20) + intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); @@ -3941,11 +3967,11 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, if (DISPLAY_VER(dev_priv) >= 8) bdw_get_trans_port_sync_config(pipe_config); + intel_psr_get_config(encoder, pipe_config); + intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA); intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC); - intel_psr_get_config(encoder, pipe_config); - intel_audio_codec_get_config(encoder, pipe_config); } @@ -5117,6 +5143,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete; encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete; + dig_port->lock = intel_tc_port_lock; + dig_port->unlock = intel_tc_port_unlock; + if (intel_tc_port_init(dig_port, is_legacy) < 0) goto err; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b10aad15a63d..7db0655d8c9e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -104,6 +104,7 @@ #include "intel_pmdemand.h" #include "intel_pps.h" #include "intel_psr.h" +#include "intel_psr_regs.h" #include "intel_sdvo.h" #include "intel_snps_phy.h" #include "intel_tc.h" @@ -2706,6 +2707,15 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) */ intel_de_write(dev_priv, PIPESRC(pipe), PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); + + if (!crtc_state->enable_psr2_su_region_et) + return; + + width = drm_rect_width(&crtc_state->psr2_su_area); + height = drm_rect_height(&crtc_state->psr2_su_area); + + intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe), + PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); } static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) @@ -4764,7 +4774,11 @@ static bool intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, const struct drm_dp_vsc_sdp *b) { - return memcmp(a, b, sizeof(*a)) == 0; + return a->pixelformat == b->pixelformat && + a->colorimetry == b->colorimetry && + a->bpc == b->bpc && + a->dynamic_range == b->dynamic_range && + a->content_type == b->content_type; } static bool @@ -4799,28 +4813,27 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, } static void -pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, +pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915, bool fastset, const char *name, const struct drm_dp_vsc_sdp *a, const struct drm_dp_vsc_sdp *b) { + struct drm_printer p; + if (fastset) { - if (!drm_debug_enabled(DRM_UT_KMS)) - return; + p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); - drm_dbg_kms(&dev_priv->drm, - "fastset requirement not met in %s dp sdp\n", name); - drm_dbg_kms(&dev_priv->drm, "expected:\n"); - drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); - drm_dbg_kms(&dev_priv->drm, "found:\n"); - drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); + drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name); } else { - drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); - drm_err(&dev_priv->drm, "expected:\n"); - drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); - drm_err(&dev_priv->drm, "found:\n"); - drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); + p = drm_err_printer(&i915->drm, NULL); + + drm_printf(&p, "mismatch in %s dp sdp\n", name); } + + drm_printf(&p, "expected:\n"); + drm_dp_vsc_sdp_log(&p, a); + drm_printf(&p, "found:\n"); + drm_dp_vsc_sdp_log(&p, b); } /* Returns the length up to and including the last differing byte */ @@ -5045,8 +5058,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } while (0) #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ - if (!current_config->has_psr && !pipe_config->has_psr && \ - !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ + if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ ¤t_config->infoframes.name, \ @@ -5199,13 +5211,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_CSC(csc); PIPE_CONF_CHECK_CSC(output_csc); - - if (current_config->active_planes) { - PIPE_CONF_CHECK_BOOL(has_psr); - PIPE_CONF_CHECK_BOOL(has_psr2); - PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); - PIPE_CONF_CHECK_I(dc3co_exitline); - } } PIPE_CONF_CHECK_BOOL(double_wide); @@ -6307,6 +6312,9 @@ int intel_atomic_check(struct drm_device *dev, int ret, i; bool any_ms = false; + if (!intel_display_driver_check_access(dev_priv)) + return -ENODEV; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { /* @@ -7068,6 +7076,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) drm_atomic_helper_wait_for_dependencies(&state->base); drm_dp_mst_atomic_wait_for_dependencies(&state->base); + intel_atomic_global_state_wait_for_dependencies(state); /* * During full modesets we write a lot of registers, wait @@ -7244,6 +7253,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_pmdemand_post_plane_update(state); drm_atomic_helper_commit_hw_done(&state->base); + intel_atomic_global_state_commit_done(state); if (state->modeset) { /* As one of the primary mmio accessors, KMS has a high @@ -7294,6 +7304,38 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) plane->frontbuffer_bit); } +static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock) +{ + int ret; + + ret = drm_atomic_helper_setup_commit(&state->base, nonblock); + if (ret) + return ret; + + ret = intel_atomic_global_state_setup_commit(state); + if (ret) + return ret; + + return 0; +} + +static int intel_atomic_swap_state(struct intel_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_swap_state(&state->base, true); + if (ret) + return ret; + + intel_atomic_swap_global_state(state); + + intel_shared_dpll_swap_state(state); + + intel_atomic_track_fbs(state); + + return 0; +} + int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) { @@ -7339,11 +7381,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, return ret; } - ret = drm_atomic_helper_setup_commit(&state->base, nonblock); + ret = intel_atomic_setup_commit(state, nonblock); if (!ret) - ret = drm_atomic_helper_swap_state(&state->base, true); - if (!ret) - intel_atomic_swap_global_state(state); + ret = intel_atomic_swap_state(state); if (ret) { struct intel_crtc_state *new_crtc_state; @@ -7357,8 +7397,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } - intel_shared_dpll_swap_state(state); - intel_atomic_track_fbs(state); drm_atomic_state_get(&state->base); INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); @@ -7811,6 +7849,7 @@ static const struct intel_display_funcs skl_display_funcs = { .crtc_disable = hsw_crtc_disable, .commit_modeset_enables = skl_commit_modeset_enables, .get_initial_plane_config = skl_get_initial_plane_config, + .fixup_initial_plane_config = skl_fixup_initial_plane_config, }; static const struct intel_display_funcs ddi_display_funcs = { @@ -7819,6 +7858,7 @@ static const struct intel_display_funcs ddi_display_funcs = { .crtc_disable = hsw_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, + .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, }; static const struct intel_display_funcs pch_split_display_funcs = { @@ -7827,6 +7867,7 @@ static const struct intel_display_funcs pch_split_display_funcs = { .crtc_disable = ilk_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, + .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, }; static const struct intel_display_funcs vlv_display_funcs = { @@ -7835,6 +7876,7 @@ static const struct intel_display_funcs vlv_display_funcs = { .crtc_disable = i9xx_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, + .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, }; static const struct intel_display_funcs i9xx_display_funcs = { @@ -7843,6 +7885,7 @@ static const struct intel_display_funcs i9xx_display_funcs = { .crtc_disable = i9xx_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, + .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, }; /** diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 47297ed85822..fdeaac994e17 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -28,6 +28,8 @@ #include "intel_opregion.h" #include "intel_wm_types.h" +struct task_struct; + struct drm_i915_private; struct drm_property; struct drm_property_blob; @@ -47,6 +49,7 @@ struct intel_fbdev; struct intel_fdi_funcs; struct intel_hotplug_funcs; struct intel_initial_plane_config; +struct intel_opregion; struct intel_overlay; /* Amount of SAGV/QGV points, BSpec precisely defines this */ @@ -64,6 +67,8 @@ struct intel_display_funcs { struct intel_crtc_state *); void (*get_initial_plane_config)(struct intel_crtc *, struct intel_initial_plane_config *); + bool (*fixup_initial_plane_config)(struct intel_crtc *crtc, + const struct intel_initial_plane_config *plane_config); void (*crtc_enable)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*crtc_disable)(struct intel_atomic_state *state, @@ -172,6 +177,12 @@ struct intel_hotplug { struct work_struct poll_init_work; bool poll_enabled; + /* + * Queuing of hotplug_work, reenable_work and poll_init_work is + * enabled. Protected by drm_i915_private::irq_lock. + */ + bool detection_work_enabled; + unsigned int hpd_storm_threshold; /* Whether or not to count short HPD IRQs in HPD storms */ u8 hpd_short_storm_enabled; @@ -299,6 +310,11 @@ struct intel_display { } funcs; struct { + bool any_task_allowed; + struct task_struct *allowed_task; + } access; + + struct { /* backlight registers and fields in struct intel_panel */ struct mutex lock; } backlight; @@ -513,7 +529,7 @@ struct intel_display { struct intel_fbc *fbc[I915_MAX_FBCS]; struct intel_frontbuffer_tracking fb_tracking; struct intel_hotplug hotplug; - struct intel_opregion opregion; + struct intel_opregion *opregion; struct intel_overlay *overlay; struct intel_display_params params; struct intel_vbt_data vbt; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index d951edb36687..6f2d13c8ccf7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -86,28 +86,6 @@ static int i915_sr_status(struct seq_file *m, void *unused) return 0; } -static int i915_opregion(struct seq_file *m, void *unused) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_opregion *opregion = &i915->display.opregion; - - if (opregion->header) - seq_write(m, opregion->header, OPREGION_SIZE); - - return 0; -} - -static int i915_vbt(struct seq_file *m, void *unused) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_opregion *opregion = &i915->display.opregion; - - if (opregion->vbt) - seq_write(m, opregion->vbt, opregion->vbt_size); - - return 0; -} - static int i915_gem_framebuffer_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1066,8 +1044,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, {"i915_sr_status", i915_sr_status, 0}, - {"i915_opregion", i915_opregion, 0}, - {"i915_vbt", i915_vbt, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, @@ -1105,10 +1081,12 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); + intel_bios_debugfs_register(i915); intel_cdclk_debugfs_register(i915); intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); intel_hpd_debugfs_register(i915); + intel_opregion_debugfs_register(i915); intel_psr_debugfs_register(i915); intel_wm_debugfs_register(i915); intel_display_debugfs_params(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c index b7e68eb62452..f35718748555 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c @@ -3,6 +3,7 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/debugfs.h> #include <linux/kernel.h> #include <drm/drm_drv.h> diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 0b522c6a8d6f..c02d79b50006 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -1012,7 +1012,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 goto display_fused_off; } - if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) { + if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) { u32 fuse_strap = intel_de_read(i915, FUSE_STRAP); u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP); diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 9df9097a0255..4f7ba7eb03d2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -45,6 +45,7 @@ #include "intel_hdcp.h" #include "intel_hotplug.h" #include "intel_hti.h" +#include "intel_modeset_lock.h" #include "intel_modeset_setup.h" #include "intel_opregion.h" #include "intel_overlay.h" @@ -276,12 +277,144 @@ cleanup_bios: return ret; } +static void set_display_access(struct drm_i915_private *i915, + bool any_task_allowed, + struct task_struct *allowed_task) +{ + struct drm_modeset_acquire_ctx ctx; + int err; + + intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) { + err = drm_modeset_lock_all_ctx(&i915->drm, &ctx); + if (err) + continue; + + i915->display.access.any_task_allowed = any_task_allowed; + i915->display.access.allowed_task = allowed_task; + } + + drm_WARN_ON(&i915->drm, err); +} + +/** + * intel_display_driver_enable_user_access - Enable display HW access for all threads + * @i915: i915 device instance + * + * Enable the display HW access for all threads. Examples for such accesses + * are modeset commits and connector probing. + * + * This function should be called during driver loading and system resume once + * all the HW initialization steps are done. + */ +void intel_display_driver_enable_user_access(struct drm_i915_private *i915) +{ + set_display_access(i915, true, NULL); + + intel_hpd_enable_detection_work(i915); +} + +/** + * intel_display_driver_disable_user_access - Disable display HW access for user threads + * @i915: i915 device instance + * + * Disable the display HW access for user threads. Examples for such accesses + * are modeset commits and connector probing. For the current thread the + * access is still enabled, which should only perform HW init/deinit + * programming (as the initial modeset during driver loading or the disabling + * modeset during driver unloading and system suspend/shutdown). This function + * should be followed by calling either intel_display_driver_enable_user_access() + * after completing the HW init programming or + * intel_display_driver_suspend_access() after completing the HW deinit + * programming. + * + * This function should be called during driver loading/unloading and system + * suspend/shutdown before starting the HW init/deinit programming. + */ +void intel_display_driver_disable_user_access(struct drm_i915_private *i915) +{ + intel_hpd_disable_detection_work(i915); + + set_display_access(i915, false, current); +} + +/** + * intel_display_driver_suspend_access - Suspend display HW access for all threads + * @i915: i915 device instance + * + * Disable the display HW access for all threads. Examples for such accesses + * are modeset commits and connector probing. This call should be either + * followed by calling intel_display_driver_resume_access(), or the driver + * should be unloaded/shutdown. + * + * This function should be called during driver unloading and system + * suspend/shutdown after completing the HW deinit programming. + */ +void intel_display_driver_suspend_access(struct drm_i915_private *i915) +{ + set_display_access(i915, false, NULL); +} + +/** + * intel_display_driver_resume_access - Resume display HW access for the resume thread + * @i915: i915 device instance + * + * Enable the display HW access for the current resume thread, keeping the + * access disabled for all other (user) threads. Examples for such accesses + * are modeset commits and connector probing. The resume thread should only + * perform HW init programming (as the restoring modeset). This function + * should be followed by calling intel_display_driver_enable_user_access(), + * after completing the HW init programming steps. + * + * This function should be called during system resume before starting the HW + * init steps. + */ +void intel_display_driver_resume_access(struct drm_i915_private *i915) +{ + set_display_access(i915, false, current); +} + +/** + * intel_display_driver_check_access - Check if the current thread has disaplay HW access + * @i915: i915 device instance + * + * Check whether the current thread has display HW access, print a debug + * message if it doesn't. Such accesses are modeset commits and connector + * probing. If the function returns %false any HW access should be prevented. + * + * Returns %true if the current thread has display HW access, %false + * otherwise. + */ +bool intel_display_driver_check_access(struct drm_i915_private *i915) +{ + char comm[TASK_COMM_LEN]; + char current_task[TASK_COMM_LEN + 16]; + char allowed_task[TASK_COMM_LEN + 16] = "none"; + + if (i915->display.access.any_task_allowed || + i915->display.access.allowed_task == current) + return true; + + snprintf(current_task, sizeof(current_task), "%s[%d]", + get_task_comm(comm, current), + task_pid_vnr(current)); + + if (i915->display.access.allowed_task) + snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", + get_task_comm(comm, i915->display.access.allowed_task), + task_pid_vnr(i915->display.access.allowed_task)); + + drm_dbg_kms(&i915->drm, + "Reject display access from task %s (allowed to %s)\n", + current_task, allowed_task); + + return false; +} + /* part #2: call after irq install, but before gem init */ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) { struct drm_device *dev = &i915->drm; enum pipe pipe; - struct intel_crtc *crtc; int ret; if (!HAS_DISPLAY(i915)) @@ -315,8 +448,6 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_display_driver_init_hw(i915); intel_dpll_update_ref_clks(i915); - intel_hdcp_component_init(i915); - if (i915->display.cdclk.max_cdclk_freq == 0) intel_update_max_cdclk(i915); @@ -326,16 +457,14 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_vga_disable(i915); intel_setup_outputs(i915); + intel_display_driver_disable_user_access(i915); + drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx); intel_acpi_assign_connector_fwnodes(i915); drm_modeset_unlock_all(dev); - for_each_intel_crtc(dev, crtc) { - if (!to_intel_crtc_state(crtc->base.state)->uapi.active) - continue; - intel_crtc_initial_plane_config(crtc); - } + intel_initial_plane_config(i915); /* * Make sure hardware watermarks really match the state we read out. @@ -357,6 +486,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915) return 0; /* + * This will bind stuff into ggtt, so it needs to be done after + * the BIOS fb takeover and whatever else magic ggtt reservations + * happen during gem/ggtt init. + */ + intel_hdcp_component_init(i915); + + /* * Force all active planes to recompute their states. So that on * mode_setcrtc after probe, all the intel_plane_state variables * are already calculated and there is no assert_plane warnings @@ -374,7 +510,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915) /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(i915); - intel_hpd_poll_disable(i915); skl_watermark_ipc_init(i915); @@ -383,7 +518,8 @@ int intel_display_driver_probe(struct drm_i915_private *i915) void intel_display_driver_register(struct drm_i915_private *i915) { - struct drm_printer p = drm_debug_printer("i915 display info:"); + struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, + "i915 display info:"); if (!HAS_DISPLAY(i915)) return; @@ -394,6 +530,8 @@ void intel_display_driver_register(struct drm_i915_private *i915) intel_audio_init(i915); + intel_display_driver_enable_user_access(i915); + intel_display_debugfs_register(i915); /* @@ -412,6 +550,7 @@ void intel_display_driver_register(struct drm_i915_private *i915) * fbdev->async_cookie. */ drm_kms_helper_poll_init(&i915->drm); + intel_hpd_poll_disable(i915); intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p); @@ -440,6 +579,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; + intel_display_driver_suspend_access(i915); + /* * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. @@ -486,14 +627,17 @@ void intel_display_driver_unregister(struct drm_i915_private *i915) return; intel_fbdev_unregister(i915); - intel_audio_deinit(i915); - /* * After flushing the fbdev (incl. a late async config which * will have delayed queuing of a hotplug event), then flush * the hotplug events. */ drm_kms_helper_poll_fini(&i915->drm); + + intel_display_driver_disable_user_access(i915); + + intel_audio_deinit(i915); + drm_atomic_helper_shutdown(&i915->drm); acpi_video_unregister(); diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h index c276a58ee329..42cc4af6d3fd 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.h +++ b/drivers/gpu/drm/i915/display/intel_display_driver.h @@ -32,5 +32,11 @@ int __intel_display_driver_resume(struct drm_i915_private *i915, struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx); +void intel_display_driver_enable_user_access(struct drm_i915_private *i915); +void intel_display_driver_disable_user_access(struct drm_i915_private *i915); +void intel_display_driver_suspend_access(struct drm_i915_private *i915); +void intel_display_driver_resume_access(struct drm_i915_private *i915); +bool intel_display_driver_check_access(struct drm_i915_private *i915); + #endif /* __INTEL_DISPLAY_DRIVER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index a7d8f3fc98de..f846c5b108b5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -266,12 +266,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, intel_uncore_posting_read(&dev_priv->uncore, reg); } -static bool i915_has_asle(struct drm_i915_private *dev_priv) +static bool i915_has_asle(struct drm_i915_private *i915) { - if (!dev_priv->display.opregion.asle) + if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915)) return false; - return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); + return intel_opregion_asle_present(i915); } /** @@ -986,7 +986,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i * their flags both in the PICA and SDE IIR. */ if (*pch_iir & SDE_PICAINTERRUPT) { - drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP); + drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); @@ -1587,7 +1587,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) struct intel_uncore *uncore = &i915->uncore; u32 display_mask, extra_mask; - if (GRAPHICS_VER(i915) >= 7) { + if (DISPLAY_VER(i915) >= 7) { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 3fdd8a517983..01eb6e4e6049 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -780,6 +780,8 @@ struct intel_plane_state { struct intel_initial_plane_config { struct intel_framebuffer *fb; + struct intel_memory_region *mem; + resource_size_t phys_base; struct i915_vma *vma; unsigned int tiling; int size; @@ -1213,12 +1215,12 @@ struct intel_crtc_state { bool has_psr; bool has_psr2; bool enable_psr2_sel_fetch; + bool enable_psr2_su_region_et; bool req_psr2_sdp_prior_scanline; bool has_panel_replay; bool wm_level_disabled; u32 dc3co_exitline; u16 su_y_granularity; - struct drm_dp_vsc_sdp psr_vsc; /* * Frequence the dpll for the port should run at. Differs from the @@ -1402,6 +1404,8 @@ struct intel_crtc_state { u32 psr2_man_track_ctl; + struct drm_rect psr2_su_area; + /* Variable Refresh Rate state */ struct { bool enable, in_range; @@ -1682,13 +1686,14 @@ struct intel_psr { /* Mutex for PSR state of the transcoder */ struct mutex lock; -#define I915_PSR_DEBUG_MODE_MASK 0x0f -#define I915_PSR_DEBUG_DEFAULT 0x00 -#define I915_PSR_DEBUG_DISABLE 0x01 -#define I915_PSR_DEBUG_ENABLE 0x02 -#define I915_PSR_DEBUG_FORCE_PSR1 0x03 -#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4 -#define I915_PSR_DEBUG_IRQ 0x10 +#define I915_PSR_DEBUG_MODE_MASK 0x0f +#define I915_PSR_DEBUG_DEFAULT 0x00 +#define I915_PSR_DEBUG_DISABLE 0x01 +#define I915_PSR_DEBUG_ENABLE 0x02 +#define I915_PSR_DEBUG_FORCE_PSR1 0x03 +#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4 +#define I915_PSR_DEBUG_IRQ 0x10 +#define I915_PSR_DEBUG_SU_REGION_ET_DISABLE 0x20 u32 debug; bool sink_support; @@ -1702,14 +1707,20 @@ struct intel_psr { unsigned int busy_frontbuffer_bits; bool sink_psr2_support; bool link_standby; - bool colorimetry_support; bool psr2_enabled; bool psr2_sel_fetch_enabled; bool psr2_sel_fetch_cff_enabled; bool req_psr2_sdp_prior_scanline; u8 sink_sync_latency; - u8 io_wake_lines; - u8 fast_wake_lines; + + struct { + u8 io_wake_lines; + u8 fast_wake_lines; + + /* LNL and beyond */ + u8 check_entry_lines; + } alpm_parameters; + ktime_t last_entry_attempt; ktime_t last_exit; bool sink_not_reliable; @@ -1833,6 +1844,8 @@ struct intel_dp { /* When we last wrote the OUI for eDP */ unsigned long last_oui_write; + + bool colorimetry_support; }; enum lspcon_vendor { @@ -1890,6 +1903,9 @@ struct intel_digital_port { u32 (*infoframes_enabled)(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); bool (*connected)(struct intel_encoder *encoder); + + void (*lock)(struct intel_digital_port *dig_port); + void (*unlock)(struct intel_digital_port *dig_port); }; struct intel_dp_mst_encoder { diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index b70502586ab9..835781624482 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -1158,7 +1158,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) str_yes_no(intel_dmc_has_payload(i915))); seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); seq_printf(m, "Pipe A fw needed: %s\n", - str_yes_no(GRAPHICS_VER(i915) >= 12)); + str_yes_no(DISPLAY_VER(i915) >= 12)); seq_printf(m, "Pipe A fw loaded: %s\n", str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); seq_printf(m, "Pipe B fw needed: %s\n", diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f5ef95da5534..e2d991edfd89 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -56,6 +56,7 @@ #include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -2355,6 +2356,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_rate = intel_dp_common_rate(intel_dp, 0); limits->max_rate = intel_dp_max_link_rate(intel_dp); + /* FIXME 128b/132b SST support missing */ + limits->max_rate = min(limits->max_rate, 810000); + limits->min_lane_count = 1; limits->max_lane_count = intel_dp_max_lane_count(intel_dp); @@ -2616,58 +2620,38 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; + struct drm_dp_vsc_sdp *vsc; - /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ - if (crtc_state->has_psr) + if ((!intel_dp->colorimetry_support || + !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) && + !crtc_state->has_psr) return; - if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) - return; + vsc = &crtc_state->infoframes.vsc; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); vsc->sdp_type = DP_SDP_VSC; - intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, - &crtc_state->infoframes.vsc); -} -void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, - struct drm_dp_vsc_sdp *vsc) -{ - vsc->sdp_type = DP_SDP_VSC; - - if (crtc_state->has_psr2) { - if (intel_dp->psr.colorimetry_support && - intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { - /* [PSR2, +Colorimetry] */ - intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, - vsc); - } else { - /* - * [PSR2, -Colorimetry] - * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 - * 3D stereo + PSR/PSR2 + Y-coordinate. - */ - vsc->revision = 0x4; - vsc->length = 0xe; - } + /* Needs colorimetry */ + if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { + intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, + vsc); + } else if (crtc_state->has_psr2) { + /* + * [PSR2 without colorimetry] + * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 + * 3D stereo + PSR/PSR2 + Y-coordinate. + */ + vsc->revision = 0x4; + vsc->length = 0xe; } else if (crtc_state->has_panel_replay) { - if (intel_dp->psr.colorimetry_support && - intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { - /* [Panel Replay with colorimetry info] */ - intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, - vsc); - } else { - /* - * [Panel Replay without colorimetry info] - * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 - * VSC SDP supporting 3D stereo + Panel Replay. - */ - vsc->revision = 0x6; - vsc->length = 0x10; - } + /* + * [Panel Replay without colorimetry info] + * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 + * VSC SDP supporting 3D stereo + Panel Replay. + */ + vsc->revision = 0x6; + vsc->length = 0x10; } else { /* * [PSR1] @@ -3345,13 +3329,6 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, fastset = false; } - if (CAN_PSR(intel_dp)) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n", - encoder->base.base.id, encoder->base.name); - crtc_state->uapi.mode_changed = true; - fastset = false; - } - return fastset; } @@ -4110,73 +4087,6 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, return false; } -static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, - struct dp_sdp *sdp, size_t size) -{ - size_t length = sizeof(struct dp_sdp); - - if (size < length) - return -ENOSPC; - - memset(sdp, 0, size); - - /* - * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 - * VSC SDP Header Bytes - */ - sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ - sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ - sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ - sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ - - if (vsc->revision == 0x6) { - sdp->db[0] = 1; - sdp->db[3] = 1; - } - - /* - * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry - * Format as per DP 1.4a spec and DP 2.0 respectively. - */ - if (!(vsc->revision == 0x5 || vsc->revision == 0x7)) - goto out; - - /* VSC SDP Payload for DB16 through DB18 */ - /* Pixel Encoding and Colorimetry Formats */ - sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ - sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ - - switch (vsc->bpc) { - case 6: - /* 6bpc: 0x0 */ - break; - case 8: - sdp->db[17] = 0x1; /* DB17[3:0] */ - break; - case 10: - sdp->db[17] = 0x2; - break; - case 12: - sdp->db[17] = 0x3; - break; - case 16: - sdp->db[17] = 0x4; - break; - default: - MISSING_CASE(vsc->bpc); - break; - } - /* Dynamic Range and Component Bit Depth */ - if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) - sdp->db[17] |= 0x80; /* DB17[7] */ - - /* Content Type */ - sdp->db[18] = vsc->content_type & 0x7; - -out: - return length; -} - static ssize_t intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, const struct hdmi_drm_infoframe *drm_infoframe, @@ -4269,8 +4179,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, switch (type) { case DP_SDP_VSC: - len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, - sizeof(sdp)); + len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, @@ -4288,24 +4197,6 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); } -void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_dp_vsc_sdp *vsc) -{ - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct dp_sdp sdp = {}; - ssize_t len; - - len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); - - if (drm_WARN_ON(&dev_priv->drm, len < 0)) - return; - - dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, - &sdp, len); -} - void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, @@ -4332,9 +4223,7 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder, if (!enable) return; - /* When PSR is enabled, VSC SDP is handled by PSR routine */ - if (!crtc_state->has_psr) - intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); + intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); } @@ -4465,10 +4354,6 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, struct dp_sdp sdp = {}; int ret; - /* When PSR is enabled, VSC SDP is handled by PSR routine */ - if (crtc_state->has_psr) - return; - if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(type)) == 0) return; @@ -4679,31 +4564,36 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum pipe pipe = crtc->pipe; u32 pattern_val; switch (data->phy_pattern) { - case DP_PHY_TEST_PATTERN_NONE: + case DP_LINK_QUAL_PATTERN_DISABLE: drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); + if (DISPLAY_VER(dev_priv) >= 10) + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_NORMAL); break; - case DP_PHY_TEST_PATTERN_D10_2: + case DP_LINK_QUAL_PATTERN_D10_2: drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); break; - case DP_PHY_TEST_PATTERN_ERROR_COUNT: + case DP_LINK_QUAL_PATTERN_ERROR_RATE: drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_SCRAMBLED_0); break; - case DP_PHY_TEST_PATTERN_PRBS7: + case DP_LINK_QUAL_PATTERN_PRBS7: drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); break; - case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: /* * FIXME: Ideally pattern should come from DPCD 0x250. As * current firmware of DPR-100 could not set it, so hardcoding @@ -4721,7 +4611,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_CUSTOM80); break; - case DP_PHY_TEST_PATTERN_CP2520: + case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: /* * FIXME: Ideally pattern should come from DPCD 0x24A. As * current firmware of DPR-100 could not set it, so hardcoding @@ -4733,8 +4623,19 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | pattern_val); break; + case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: + if (DISPLAY_VER(dev_priv) < 10) { + drm_warn(&dev_priv->drm, "Platform does not support TPS4\n"); + break; + } + drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n"); + intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); + break; default: - WARN(1, "Invalid Phy Test Pattern\n"); + drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n"); } } @@ -5453,8 +5354,24 @@ edp_detect(struct intel_dp *intel_dp) return connector_status_connected; } +void intel_digital_port_lock(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (dig_port->lock) + dig_port->lock(dig_port); +} + +void intel_digital_port_unlock(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (dig_port->unlock) + dig_port->unlock(dig_port); +} + /* - * intel_digital_port_connected - is the specified port connected? + * intel_digital_port_connected_locked - is the specified port connected? * @encoder: intel_encoder * * In cases where there's a connector physically connected but it can't be used @@ -5462,21 +5379,44 @@ edp_detect(struct intel_dp *intel_dp) * pretty much treat the port as disconnected. This is relevant for type-C * (starting on ICL) where there's ownership involved. * + * The caller must hold the lock acquired by calling intel_digital_port_lock() + * when calling this function. + * * Return %true if port is connected, %false otherwise. */ -bool intel_digital_port_connected(struct intel_encoder *encoder) +bool intel_digital_port_connected_locked(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port); bool is_connected = false; intel_wakeref_t wakeref; - with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) - is_connected = dig_port->connected(encoder); + with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) { + unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4); + + do { + is_connected = dig_port->connected(encoder); + if (is_connected || is_glitch_free) + break; + usleep_range(10, 30); + } while (time_before(jiffies, wait_expires)); + } return is_connected; } +bool intel_digital_port_connected(struct intel_encoder *encoder) +{ + bool ret; + + intel_digital_port_lock(encoder); + ret = intel_digital_port_connected_locked(encoder); + intel_digital_port_unlock(encoder); + + return ret; +} + static const struct drm_edid * intel_dp_get_edid(struct intel_dp *intel_dp) { @@ -5670,6 +5610,9 @@ intel_dp_detect(struct drm_connector *connector, if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; + if (!intel_display_driver_check_access(dev_priv)) + return connector->status; + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); @@ -5770,6 +5713,10 @@ intel_dp_force(struct drm_connector *connector) drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + + if (!intel_display_driver_check_access(dev_priv)) + return; + intel_dp_unset_edid(intel_dp); if (connector->status != connector_status_connected) @@ -6054,7 +6001,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector, spin_unlock_irq(&i915->irq_lock); if (need_work) - queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0); + intel_hpd_schedule_detection(i915); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -6497,6 +6444,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, connector->interlace_allowed = true; intel_connector->polled = DRM_CONNECTOR_POLL_HPD; + intel_connector->base.polled = intel_connector->polled; intel_connector_attach_encoder(intel_connector, intel_encoder); @@ -6527,6 +6475,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, "HDCP init failed, skipping.\n"); } + intel_dp->colorimetry_support = + intel_dp_get_colorimetry_status(intel_dp); + intel_dp->frl.is_trained = false; intel_dp->frl.trained_rate_gbps = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 05db46b111f2..530cc97bc42f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -109,20 +109,16 @@ int intel_dp_max_data_rate(int max_link_rate, int max_lanes); bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, - struct drm_dp_vsc_sdp *vsc); -void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_dp_vsc_sdp *vsc); void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_read_dp_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, unsigned int type); +void intel_digital_port_lock(struct intel_encoder *encoder); +void intel_digital_port_unlock(struct intel_encoder *encoder); bool intel_digital_port_connected(struct intel_encoder *encoder); +bool intel_digital_port_connected_locked(struct intel_encoder *encoder); int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, u8 dsc_max_bpc); u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 2e2af71bcd5a..4f4a0e3b3114 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -9,6 +9,7 @@ #include "intel_bios.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_aux_regs.h" #include "intel_pps.h" @@ -228,9 +229,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, u32 aux_send_ctl_flags) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; enum intel_display_power_domain aux_domain; @@ -245,18 +245,16 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); - if (is_tc_port) { - intel_tc_port_lock(dig_port); - /* - * Abort transfers on a disconnected port as required by - * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX - * timeouts that would otherwise happen. - * TODO: abort the transfer on non-TC ports as well. - */ - if (!intel_tc_port_connected_locked(&dig_port->base)) { - ret = -ENXIO; - goto out_unlock; - } + intel_digital_port_lock(encoder); + /* + * Abort transfers on a disconnected port as required by + * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX + * timeouts that would otherwise happen. + */ + if (!intel_dp_is_edp(intel_dp) && + !intel_digital_port_connected_locked(&dig_port->base)) { + ret = -ENXIO; + goto out_unlock; } aux_domain = intel_aux_power_domain(dig_port); @@ -423,8 +421,7 @@ out: intel_pps_unlock(intel_dp, pps_wakeref); intel_display_power_put_async(i915, aux_domain, aux_wakeref); out_unlock: - if (is_tc_port) - intel_tc_port_unlock(dig_port); + intel_digital_port_unlock(encoder); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 8a9432335030..5fa25a5a36b5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -37,6 +37,7 @@ #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_hdcp.h" @@ -1410,6 +1411,9 @@ intel_dp_mst_detect(struct drm_connector *connector, if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; + if (!intel_display_driver_check_access(i915)) + return connector->status; + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, intel_connector->port); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index ef57dad1a9cb..e7e0a4cf9f93 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -1263,11 +1263,11 @@ static const struct dpll_info hsw_plls[] = { { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, }, { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, }, { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810, - .flags = INTEL_DPLL_ALWAYS_ON, }, + .always_on = true, }, { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350, - .flags = INTEL_DPLL_ALWAYS_ON, }, + .always_on = true, }, { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700, - .flags = INTEL_DPLL_ALWAYS_ON, }, + .always_on = true, }, {} }; @@ -1945,7 +1945,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { static const struct dpll_info skl_plls[] = { { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0, - .flags = INTEL_DPLL_ALWAYS_ON, }, + .always_on = true, }, { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, }, { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, }, { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, }, @@ -3308,6 +3308,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; struct skl_wrpll_params pll_params = {}; @@ -3326,7 +3328,11 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, return ret; /* this is mainly for the fastset check */ - icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY); + if (old_crtc_state->shared_dpll && + old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL) + icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT); + else + icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY); crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL, &port_dpll->hw_state); @@ -4023,7 +4029,8 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = { static const struct dpll_info icl_plls[] = { { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, - { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, }, + { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, + .is_alt_port_dpll = true, }, { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, }, { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, }, { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, }, @@ -4068,7 +4075,8 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = { static const struct dpll_info tgl_plls[] = { { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, - { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, }, + { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, + .is_alt_port_dpll = true, }, { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, }, { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, }, { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, }, @@ -4141,7 +4149,8 @@ static const struct intel_dpll_mgr adls_pll_mgr = { static const struct dpll_info adlp_plls[] = { { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, - { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, }, + { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, + .is_alt_port_dpll = true, }, { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, }, { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, }, { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, }, @@ -4465,31 +4474,29 @@ verify_single_dpll_state(struct drm_i915_private *i915, struct intel_crtc *crtc, const struct intel_crtc_state *new_crtc_state) { - struct intel_dpll_hw_state dpll_hw_state; + struct intel_dpll_hw_state dpll_hw_state = {}; u8 pipe_mask; bool active; - memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); - - drm_dbg_kms(&i915->drm, "%s\n", pll->info->name); - active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state); - if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { + if (!pll->info->always_on) { I915_STATE_WARN(i915, !pll->on && pll->active_mask, - "pll in active use but not on in sw tracking\n"); + "%s: pll in active use but not on in sw tracking\n", + pll->info->name); I915_STATE_WARN(i915, pll->on && !pll->active_mask, - "pll is on but not used by any active pipe\n"); + "%s: pll is on but not used by any active pipe\n", + pll->info->name); I915_STATE_WARN(i915, pll->on != active, - "pll on state mismatch (expected %i, found %i)\n", - pll->on, active); + "%s: pll on state mismatch (expected %i, found %i)\n", + pll->info->name, pll->on, active); } if (!crtc) { I915_STATE_WARN(i915, pll->active_mask & ~pll->state.pipe_mask, - "more active pll users than references: 0x%x vs 0x%x\n", - pll->active_mask, pll->state.pipe_mask); + "%s: more active pll users than references: 0x%x vs 0x%x\n", + pll->info->name, pll->active_mask, pll->state.pipe_mask); return; } @@ -4498,21 +4505,29 @@ verify_single_dpll_state(struct drm_i915_private *i915, if (new_crtc_state->hw.active) I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask), - "pll active mismatch (expected pipe %c in active mask 0x%x)\n", - pipe_name(crtc->pipe), pll->active_mask); + "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); else I915_STATE_WARN(i915, pll->active_mask & pipe_mask, - "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", - pipe_name(crtc->pipe), pll->active_mask); + "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask), - "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", - pipe_mask, pll->state.pipe_mask); + "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", + pll->info->name, pipe_mask, pll->state.pipe_mask); I915_STATE_WARN(i915, pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, sizeof(dpll_hw_state)), - "pll hw state mismatch\n"); + "%s: pll hw state mismatch\n", + pll->info->name); +} + +static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll, + const struct intel_shared_dpll *new_pll) +{ + return old_pll && new_pll && old_pll != new_pll && + (old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll); } void intel_shared_dpll_state_verify(struct intel_atomic_state *state, @@ -4534,11 +4549,15 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state, struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; I915_STATE_WARN(i915, pll->active_mask & pipe_mask, - "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", - pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask, - "pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n", - pipe_name(crtc->pipe), pll->state.pipe_mask); + "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + + /* TC ports have both MG/TC and TBT PLL referenced simultaneously */ + I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll, + new_crtc_state->shared_dpll) && + pll->state.pipe_mask & pipe_mask, + "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n", + pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 2e7ea0d8d3ff..616afe861b46 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -276,15 +276,21 @@ struct dpll_info { */ enum intel_display_power_domain power_domain; -#define INTEL_DPLL_ALWAYS_ON (1 << 0) /** - * @flags: + * @always_on: * - * INTEL_DPLL_ALWAYS_ON - * Inform the state checker that the DPLL is kept enabled even if - * not in use by any CRTC. + * Inform the state checker that the DPLL is kept enabled even if + * not in use by any CRTC. */ - u32 flags; + bool always_on; + + /** + * @is_alt_port_dpll: + * + * Inform the state checker that the DPLL can be used as a fallback + * (for TC->TBT fallback). + */ + bool is_alt_port_dpll; }; /** diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 482c28b5c2de..a6c7122fd671 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -453,6 +453,10 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state, if (!HAS_DSB(i915)) return NULL; + /* TODO: DSB is broken in Xe KMD, so disabling it until fixed */ + if (!IS_ENABLED(I915)) + return NULL; + dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); if (!dsb) goto out; diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 9111e9d46486..8ca9ae4798a8 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -35,6 +35,7 @@ #include "i915_reg.h" #include "intel_connector.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_dvo.h" #include "intel_dvo_dev.h" @@ -328,6 +329,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force) if (!intel_display_device_enabled(i915)) return connector_status_disconnected; + if (!intel_display_driver_check_access(i915)) + return connector->base.status; + return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } @@ -536,6 +540,7 @@ void intel_dvo_init(struct drm_i915_private *i915) if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + connector->base.polled = connector->polled; drm_connector_init_with_ddc(&i915->drm, &connector->base, &intel_dvo_connector_funcs, diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index f17a1afb4929..b453fcbd67da 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1087,18 +1087,7 @@ static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state) static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->hw.fb; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - case I915_FORMAT_MOD_4_TILED: - case I915_FORMAT_MOD_X_TILED: - return true; - default: - return false; - } + return true; } static bool tiling_is_valid(const struct intel_plane_state *plane_state) diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c index 717c3a3237c4..0665f943f65f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -78,8 +78,9 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info /* Use fbdev's framebuffer from lmem for discrete */ info->fix.smem_start = - (unsigned long)(mem->io_start + - i915_gem_object_get_dma_address(obj, 0)); + (unsigned long)(mem->io.start + + i915_gem_object_get_dma_address(obj, 0) - + mem->region.start); info->fix.smem_len = obj->base.size; } else { struct i915_ggtt *ggtt = to_gt(i915)->ggtt; diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c index e8e8be54143b..cbcd1e91b7be 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.c +++ b/drivers/gpu/drm/i915/display/intel_global_state.c @@ -10,12 +10,55 @@ #include "intel_display_types.h" #include "intel_global_state.h" +struct intel_global_commit { + struct kref ref; + struct completion done; +}; + +static struct intel_global_commit *commit_new(void) +{ + struct intel_global_commit *commit; + + commit = kzalloc(sizeof(*commit), GFP_KERNEL); + if (!commit) + return NULL; + + init_completion(&commit->done); + kref_init(&commit->ref); + + return commit; +} + +static void __commit_free(struct kref *kref) +{ + struct intel_global_commit *commit = + container_of(kref, typeof(*commit), ref); + + kfree(commit); +} + +static struct intel_global_commit *commit_get(struct intel_global_commit *commit) +{ + if (commit) + kref_get(&commit->ref); + + return commit; +} + +static void commit_put(struct intel_global_commit *commit) +{ + if (commit) + kref_put(&commit->ref, __commit_free); +} + static void __intel_atomic_global_state_free(struct kref *kref) { struct intel_global_state *obj_state = container_of(kref, struct intel_global_state, ref); struct intel_global_obj *obj = obj_state->obj; + commit_put(obj_state->commit); + obj->funcs->atomic_destroy_state(obj, obj_state); } @@ -127,6 +170,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state, obj_state->obj = obj; obj_state->changed = false; + obj_state->serialized = false; + obj_state->commit = NULL; kref_init(&obj_state->ref); @@ -239,19 +284,13 @@ int intel_atomic_lock_global_state(struct intel_global_state *obj_state) int intel_atomic_serialize_global_state(struct intel_global_state *obj_state) { - struct intel_atomic_state *state = obj_state->state; - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; + int ret; - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; + ret = intel_atomic_lock_global_state(obj_state); + if (ret) + return ret; - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } - - obj_state->changed = true; + obj_state->serialized = true; return 0; } @@ -267,3 +306,79 @@ intel_atomic_global_state_is_serialized(struct intel_atomic_state *state) return false; return true; } + +int +intel_atomic_global_state_setup_commit(struct intel_atomic_state *state) +{ + const struct intel_global_state *old_obj_state; + struct intel_global_state *new_obj_state; + struct intel_global_obj *obj; + int i; + + for_each_oldnew_global_obj_in_state(state, obj, old_obj_state, + new_obj_state, i) { + struct intel_global_commit *commit = NULL; + + if (new_obj_state->serialized) { + /* + * New commit which is going to be completed + * after the hardware reprogramming is done. + */ + commit = commit_new(); + if (!commit) + return -ENOMEM; + } else if (new_obj_state->changed) { + /* + * We're going to swap to this state, so carry the + * previous commit along, in case it's not yet done. + */ + commit = commit_get(old_obj_state->commit); + } + + new_obj_state->commit = commit; + } + + return 0; +} + +int +intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_global_state *old_obj_state; + struct intel_global_obj *obj; + int i; + + for_each_old_global_obj_in_state(state, obj, old_obj_state, i) { + struct intel_global_commit *commit = old_obj_state->commit; + long ret; + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->done, 10 * HZ); + if (ret == 0) { + drm_err(&i915->drm, "global state timed out\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +void +intel_atomic_global_state_commit_done(struct intel_atomic_state *state) +{ + const struct intel_global_state *new_obj_state; + struct intel_global_obj *obj; + int i; + + for_each_new_global_obj_in_state(state, obj, new_obj_state, i) { + struct intel_global_commit *commit = new_obj_state->commit; + + if (!new_obj_state->serialized) + continue; + + complete_all(&commit->done); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h index 5477de8f0b30..5c8545d7a76a 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.h +++ b/drivers/gpu/drm/i915/display/intel_global_state.h @@ -54,11 +54,14 @@ struct intel_global_obj { (__i)++) \ for_each_if(obj) +struct intel_global_commit; + struct intel_global_state { struct intel_global_obj *obj; struct intel_atomic_state *state; + struct intel_global_commit *commit; struct kref ref; - bool changed; + bool changed, serialized; }; struct __intel_global_objs_state { @@ -87,6 +90,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state); int intel_atomic_lock_global_state(struct intel_global_state *obj_state); int intel_atomic_serialize_global_state(struct intel_global_state *obj_state); +int intel_atomic_global_state_setup_commit(struct intel_atomic_state *state); +void intel_atomic_global_state_commit_done(struct intel_atomic_state *state); +int intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state); + bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state); #endif diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index e9e4dcf345f9..d3e03ed5b79c 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -155,7 +155,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, const struct gmbus_pin *pins; size_t size; - if (INTEL_PCH_TYPE(i915) >= PCH_LNL) { + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) { pins = gmbus_pins_mtp; size = ARRAY_SIZE(gmbus_pins_mtp); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) { @@ -164,9 +164,6 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { pins = gmbus_pins_dg1; size = ARRAY_SIZE(gmbus_pins_dg1); - } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) { - pins = gmbus_pins_mtp; - size = ARRAY_SIZE(gmbus_pins_mtp); } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { pins = gmbus_pins_icp; size = ARRAY_SIZE(gmbus_pins_icp); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 39b3f7c0c77c..c3e692e7f790 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -347,7 +347,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, default: drm_err(&i915->drm, "Unknown transcoder %d\n", cpu_transcoder); - return -EINVAL; + return 0; } } @@ -364,7 +364,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: drm_err(&i915->drm, "Unknown port %d\n", port); - return -EINVAL; + return 0; } } @@ -853,8 +853,8 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (shim->stream_encryption) { ret = shim->stream_encryption(connector, true); if (ret) { - drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n", - connector->base.name, connector->base.base.id); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", + connector->base.base.id, connector->base.name); return ret; } drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", @@ -878,14 +878,14 @@ static int _intel_hdcp_disable(struct intel_connector *connector) u32 repeater_ctl; int ret; - drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", + connector->base.base.id, connector->base.name); if (hdcp->shim->stream_encryption) { ret = hdcp->shim->stream_encryption(connector, false); if (ret) { - drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n", - connector->base.name, connector->base.base.id); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", + connector->base.base.id, connector->base.name); return ret; } drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", @@ -929,8 +929,8 @@ static int intel_hdcp1_enable(struct intel_connector *connector) struct intel_hdcp *hdcp = &connector->hdcp; int i, ret, tries = 3; - drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", + connector->base.base.id, connector->base.name); if (!hdcp_key_loadable(i915)) { drm_err(&i915->drm, "HDCP key Load is not possible\n"); @@ -1027,8 +1027,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector) if (drm_WARN_ON(&i915->drm, !intel_hdcp_in_use(i915, cpu_transcoder, port))) { drm_err(&i915->drm, - "%s:%d HDCP link stopped encryption,%x\n", - connector->base.name, connector->base.base.id, + "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", + connector->base.base.id, connector->base.name, intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); ret = -ENXIO; intel_hdcp_update_value(connector, @@ -1046,8 +1046,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector) } drm_dbg_kms(&i915->drm, - "[%s:%d] HDCP link failed, retrying authentication\n", - connector->base.name, connector->base.base.id); + "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", + connector->base.base.id, connector->base.name); ret = _intel_hdcp_disable(connector); if (ret) { @@ -1633,6 +1633,12 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); + if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) { + drm_dbg_kms(&i915->drm, + "HDCP1.x or 2.0 Legacy Device Downstream\n"); + return -EINVAL; + } + /* Converting and Storing the seq_num_v to local variable as DWORD */ seq_num_v = drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); @@ -1731,8 +1737,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)) { - drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n", - connector->base.name, connector->base.base.id); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", + connector->base.base.id, connector->base.name); ret = -EPERM; goto link_recover; } @@ -1740,8 +1746,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, true); if (ret) { - drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n", - connector->base.name, connector->base.base.id); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", + connector->base.base.id, connector->base.name); return ret; } drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", @@ -1925,8 +1931,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector) struct intel_hdcp *hdcp = &connector->hdcp; int ret; - drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n", - connector->base.name, connector->base.base.id, + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", + connector->base.base.id, connector->base.name, hdcp->content_type); ret = hdcp2_authenticate_and_encrypt(connector); @@ -1936,8 +1942,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector) return ret; } - drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n", - connector->base.name, connector->base.base.id, + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", + connector->base.base.id, connector->base.name, hdcp->content_type); hdcp->hdcp2_encrypted = true; @@ -1953,14 +1959,14 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery struct intel_hdcp *hdcp = &connector->hdcp; int ret; - drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", + connector->base.base.id, connector->base.name); if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, false); if (ret) { - drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n", - connector->base.name, connector->base.base.id); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", + connector->base.base.id, connector->base.name); return ret; } drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", @@ -2040,20 +2046,20 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } drm_dbg_kms(&i915->drm, - "[%s:%d] Repeater topology auth failed.(%d)\n", - connector->base.name, connector->base.base.id, + "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n", + connector->base.base.id, connector->base.name, ret); } else { drm_dbg_kms(&i915->drm, - "[%s:%d] HDCP2.2 link failed, retrying auth\n", - connector->base.name, connector->base.base.id); + "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", + connector->base.base.id, connector->base.name); } ret = _intel_hdcp2_disable(connector, true); if (ret) { drm_err(&i915->drm, - "[%s:%d] Failed to disable hdcp2.2 (%d)\n", - connector->base.name, connector->base.base.id, ret); + "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", + connector->base.base.id, connector->base.name, ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; @@ -2062,8 +2068,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) ret = _intel_hdcp2_enable(connector); if (ret) { drm_dbg_kms(&i915->drm, - "[%s:%d] Failed to enable hdcp2.2 (%d)\n", - connector->base.name, connector->base.base.id, + "[CONNECTOR:%d:%s] Failed to enable hdcp2.2 (%d)\n", + connector->base.base.id, connector->base.name, ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, @@ -2341,8 +2347,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, return -ENOENT; if (!connector->encoder) { - drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n", - connector->base.name, connector->base.base.id); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", + connector->base.base.id, connector->base.name); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 18117b789b16..302bff75b06c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -65,7 +65,7 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, goto out_unmap; } - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto out_unmap; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h index 8023c85c7fa0..a568a457e532 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h @@ -8,6 +8,8 @@ #include "intel_display_reg_defs.h" +#define TRANS_HDCP(__i915) (DISPLAY_VER(__i915) >= 12) + /* HDCP Key Registers */ #define HDCP_KEY_CONF _MMIO(0x66c00) #define HDCP_AKSV_SEND_TRIGGER REG_BIT(31) @@ -82,7 +84,7 @@ #define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ _TRANSB_HDCP_CONF) #define HDCP_CONF(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_CONF(trans) : \ PORT_HDCP_CONF(port)) @@ -95,7 +97,7 @@ _TRANSA_HDCP_ANINIT, \ _TRANSB_HDCP_ANINIT) #define HDCP_ANINIT(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_ANINIT(trans) : \ PORT_HDCP_ANINIT(port)) @@ -105,7 +107,7 @@ #define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ _TRANSB_HDCP_ANLO) #define HDCP_ANLO(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_ANLO(trans) : \ PORT_HDCP_ANLO(port)) @@ -115,7 +117,7 @@ #define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ _TRANSB_HDCP_ANHI) #define HDCP_ANHI(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_ANHI(trans) : \ PORT_HDCP_ANHI(port)) @@ -126,7 +128,7 @@ _TRANSA_HDCP_BKSVLO, \ _TRANSB_HDCP_BKSVLO) #define HDCP_BKSVLO(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_BKSVLO(trans) : \ PORT_HDCP_BKSVLO(port)) @@ -137,7 +139,7 @@ _TRANSA_HDCP_BKSVHI, \ _TRANSB_HDCP_BKSVHI) #define HDCP_BKSVHI(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_BKSVHI(trans) : \ PORT_HDCP_BKSVHI(port)) @@ -148,7 +150,7 @@ _TRANSA_HDCP_RPRIME, \ _TRANSB_HDCP_RPRIME) #define HDCP_RPRIME(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_RPRIME(trans) : \ PORT_HDCP_RPRIME(port)) @@ -159,7 +161,7 @@ _TRANSA_HDCP_STATUS, \ _TRANSB_HDCP_STATUS) #define HDCP_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP_STATUS(trans) : \ PORT_HDCP_STATUS(port)) @@ -200,7 +202,7 @@ #define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19) #define AUTH_CLR_KEYS REG_BIT(18) #define HDCP2_AUTH(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_AUTH(trans) : \ PORT_HDCP2_AUTH(port)) @@ -211,7 +213,7 @@ _TRANSB_HDCP2_CTL) #define CTL_LINK_ENCRYPTION_REQ REG_BIT(31) #define HDCP2_CTL(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_CTL(trans) : \ PORT_HDCP2_CTL(port)) @@ -225,7 +227,7 @@ #define LINK_AUTH_STATUS REG_BIT(21) #define LINK_ENCRYPTION_STATUS REG_BIT(20) #define HDCP2_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_STATUS(trans) : \ PORT_HDCP2_STATUS(port)) @@ -247,7 +249,7 @@ #define STREAM_ENCRYPTION_STATUS REG_BIT(31) #define STREAM_TYPE_STATUS REG_BIT(30) #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_STREAM_STATUS(trans) : \ PIPE_HDCP2_STREAM_STATUS(pipe)) @@ -263,7 +265,7 @@ _TRANSB_HDCP2_AUTH_STREAM) #define AUTH_STREAM_TYPE REG_BIT(31) #define HDCP2_AUTH_STREAM(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ + (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_AUTH_STREAM(trans) : \ PORT_HDCP2_AUTH_STREAM(port)) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 39e4f5f7c817..7020e5806109 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -49,6 +49,7 @@ #include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_gmbus.h" @@ -523,10 +524,12 @@ void hsw_write_infoframe(struct intel_encoder *encoder, 0); /* Wa_14013475917 */ - if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC) - return; + if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)) + val |= hsw_infoframe_enable(type); + + if (type == DP_SDP_VSC) + val |= VSC_DIP_HW_DATA_SW_HEA; - val |= hsw_infoframe_enable(type); intel_de_write(dev_priv, ctl_reg, val); intel_de_posting_read(dev_priv, ctl_reg); } @@ -2503,6 +2506,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; + if (!intel_display_driver_check_access(dev_priv)) + return connector->status; + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (DISPLAY_VER(dev_priv) >= 11 && @@ -2531,6 +2537,9 @@ intel_hdmi_force(struct drm_connector *connector) drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (!intel_display_driver_check_access(i915)) + return; + intel_hdmi_unset_edid(connector); if (connector->status != connector_status_connected) @@ -3015,6 +3024,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, connector->ycbcr_420_allowed = true; intel_connector->polled = DRM_CONNECTOR_POLL_HPD; + intel_connector->base.polled = intel_connector->polled; if (HAS_DDI(dev_priv)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 0c0700c6ec66..d9ec349f3c8c 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, return storm; } +static bool detection_work_enabled(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->irq_lock); + + return i915->display.hotplug.detection_work_enabled; +} + +static bool +mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) +{ + lockdep_assert_held(&i915->irq_lock); + + if (!detection_work_enabled(i915)) + return false; + + return mod_delayed_work(i915->unordered_wq, work, delay); +} + +static bool +queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) +{ + lockdep_assert_held(&i915->irq_lock); + + if (!detection_work_enabled(i915)) + return false; + + return queue_delayed_work(i915->unordered_wq, work, delay); +} + +static bool +queue_detection_work(struct drm_i915_private *i915, struct work_struct *work) +{ + lockdep_assert_held(&i915->irq_lock); + + if (!detection_work_enabled(i915)) + return false; + + return queue_work(i915->unordered_wq, work); +} + static void intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) { @@ -213,9 +253,9 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { drm_kms_helper_poll_reschedule(&dev_priv->drm); - mod_delayed_work(dev_priv->unordered_wq, - &dev_priv->display.hotplug.reenable_work, - msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); + mod_delayed_detection_work(dev_priv, + &dev_priv->display.hotplug.reenable_work, + msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } @@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work) if (old_bits) { spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.event_bits |= old_bits; + queue_delayed_detection_work(dev_priv, + &dev_priv->display.hotplug.hotplug_work, 0); spin_unlock_irq(&dev_priv->irq_lock); - queue_delayed_work(dev_priv->unordered_wq, - &dev_priv->display.hotplug.hotplug_work, 0); } } @@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work) if (retry) { spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.retry_bits |= retry; - spin_unlock_irq(&dev_priv->irq_lock); - mod_delayed_work(dev_priv->unordered_wq, - &dev_priv->display.hotplug.hotplug_work, - msecs_to_jiffies(HPD_RETRY_DELAY)); + mod_delayed_detection_work(dev_priv, + &dev_priv->display.hotplug.hotplug_work, + msecs_to_jiffies(HPD_RETRY_DELAY)); + spin_unlock_irq(&dev_priv->irq_lock); } } @@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, */ if (storm_detected) intel_hpd_irq_setup(dev_priv); - spin_unlock(&dev_priv->irq_lock); /* * Our hotplug handler can grab modeset locks (by calling down into the @@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (queue_dig) queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); if (queue_hp) - queue_delayed_work(dev_priv->unordered_wq, - &dev_priv->display.hotplug.hotplug_work, 0); + queue_delayed_detection_work(dev_priv, + &dev_priv->display.hotplug.hotplug_work, 0); + + spin_unlock(&dev_priv->irq_lock); } /** @@ -710,6 +751,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work) cancel_work(&dev_priv->display.hotplug.poll_init_work); } + spin_lock_irq(&dev_priv->irq_lock); + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; @@ -718,6 +761,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work) if (pin == HPD_NONE) continue; + if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) + continue; + connector->base.polled = connector->polled; if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) @@ -726,6 +772,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work) } drm_connector_list_iter_end(&conn_iter); + spin_unlock_irq(&dev_priv->irq_lock); + if (enabled) drm_kms_helper_poll_reschedule(&dev_priv->drm); @@ -774,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * As well, there's no issue if we race here since we always reschedule * this worker anyway */ - queue_work(dev_priv->unordered_wq, - &dev_priv->display.hotplug.poll_init_work); + spin_lock_irq(&dev_priv->irq_lock); + queue_detection_work(dev_priv, + &dev_priv->display.hotplug.poll_init_work); + spin_unlock_irq(&dev_priv->irq_lock); } /** @@ -803,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) return; WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); - queue_work(dev_priv->unordered_wq, - &dev_priv->display.hotplug.poll_init_work); + + spin_lock_irq(&dev_priv->irq_lock); + queue_detection_work(dev_priv, + &dev_priv->display.hotplug.poll_init_work); + spin_unlock_irq(&dev_priv->irq_lock); } void intel_hpd_init_early(struct drm_i915_private *i915) @@ -826,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915) i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); } +static bool cancel_all_detection_work(struct drm_i915_private *i915) +{ + bool was_pending = false; + + if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work)) + was_pending = true; + if (cancel_work_sync(&i915->display.hotplug.poll_init_work)) + was_pending = true; + if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work)) + was_pending = true; + + return was_pending; +} + void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv)) @@ -841,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); - cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); - cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); - cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); + + /* + * All other work triggered by hotplug events should be canceled by + * now. + */ + if (cancel_all_detection_work(dev_priv)) + drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); } bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) @@ -873,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) spin_unlock_irq(&dev_priv->irq_lock); } +static void queue_work_for_missed_irqs(struct drm_i915_private *i915) +{ + bool queue_work = false; + enum hpd_pin pin; + + lockdep_assert_held(&i915->irq_lock); + + if (i915->display.hotplug.event_bits || + i915->display.hotplug.retry_bits) + queue_work = true; + + for_each_hpd_pin(pin) { + switch (i915->display.hotplug.stats[pin].state) { + case HPD_MARK_DISABLED: + queue_work = true; + break; + case HPD_ENABLED: + break; + default: + MISSING_CASE(i915->display.hotplug.stats[pin].state); + } + } + + if (queue_work) + queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); +} + +void intel_hpd_enable_detection_work(struct drm_i915_private *i915) +{ + spin_lock_irq(&i915->irq_lock); + i915->display.hotplug.detection_work_enabled = true; + queue_work_for_missed_irqs(i915); + spin_unlock_irq(&i915->irq_lock); +} + +void intel_hpd_disable_detection_work(struct drm_i915_private *i915) +{ + spin_lock_irq(&i915->irq_lock); + i915->display.hotplug.detection_work_enabled = false; + spin_unlock_irq(&i915->irq_lock); + + cancel_all_detection_work(i915); +} + +bool intel_hpd_schedule_detection(struct drm_i915_private *i915) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&i915->irq_lock, flags); + ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); + spin_unlock_irqrestore(&i915->irq_lock, flags); + + return ret; +} + static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 424ae5dbf5a0..a17253ddec83 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -30,4 +30,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_debugfs_register(struct drm_i915_private *i915); +void intel_hpd_enable_detection_work(struct drm_i915_private *i915); +void intel_hpd_disable_detection_work(struct drm_i915_private *i915); +bool intel_hpd_schedule_detection(struct drm_i915_private *i915); + #endif /* __INTEL_HOTPLUG_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 04f62f27ad74..76076509f771 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -163,12 +163,10 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) return; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_LNL) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL) hpd->pch_hpd = hpd_mtp; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) hpd->pch_hpd = hpd_sde_dg1; - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) - hpd->pch_hpd = hpd_mtp; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) hpd->pch_hpd = hpd_icp; else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) @@ -1139,7 +1137,7 @@ static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) if (INTEL_PCH_TYPE(i915) >= PCH_LNL) xe2lpd_sde_hpd_irq_setup(i915); - else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) + else if (INTEL_PCH_TYPE(i915) >= PCH_MTL) mtp_hpd_irq_setup(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 1ce785db6a5e..f242bb320610 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -250,11 +250,36 @@ struct opregion_asle_ext { #define MAX_DSLP 1500 +#define OPREGION_SIZE (8 * 1024) + +struct intel_opregion { + struct drm_i915_private *i915; + + struct opregion_header *header; + struct opregion_acpi *acpi; + struct opregion_swsci *swsci; + u32 swsci_gbda_sub_functions; + u32 swsci_sbcb_sub_functions; + struct opregion_asle *asle; + struct opregion_asle_ext *asle_ext; + void *rvda; + void *vbt_firmware; + const void *vbt; + u32 vbt_size; + struct work_struct asle_work; + struct notifier_block acpi_notifier; +}; + static int check_swsci_function(struct drm_i915_private *i915, u32 function) { - struct opregion_swsci *swsci = i915->display.opregion.swsci; + struct intel_opregion *opregion = i915->display.opregion; + struct opregion_swsci *swsci; u32 main_function, sub_function; + if (!opregion) + return -ENODEV; + + swsci = opregion->swsci; if (!swsci) return -ENODEV; @@ -265,11 +290,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) /* Check if we can call the function. See swsci_setup for details. */ if (main_function == SWSCI_SBCB) { - if ((i915->display.opregion.swsci_sbcb_sub_functions & + if ((opregion->swsci_sbcb_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } else if (main_function == SWSCI_GBDA) { - if ((i915->display.opregion.swsci_gbda_sub_functions & + if ((opregion->swsci_gbda_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } @@ -280,7 +305,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) static int swsci(struct drm_i915_private *dev_priv, u32 function, u32 parm, u32 *parm_out) { - struct opregion_swsci *swsci = dev_priv->display.opregion.swsci; + struct opregion_swsci *swsci; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 scic, dslp; u16 swsci_val; @@ -290,6 +315,8 @@ static int swsci(struct drm_i915_private *dev_priv, if (ret) return ret; + swsci = dev_priv->display.opregion->swsci; + /* Driver sleep timeout in ms. */ dslp = swsci->dslp; if (!dslp) { @@ -462,7 +489,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; - struct opregion_asle *asle = dev_priv->display.opregion.asle; + struct opregion_asle *asle = dev_priv->display.opregion->asle; drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp); @@ -584,9 +611,8 @@ static void asle_work(struct work_struct *work) { struct intel_opregion *opregion = container_of(work, struct intel_opregion, asle_work); - struct drm_i915_private *dev_priv = - container_of(opregion, struct drm_i915_private, display.opregion); - struct opregion_asle *asle = dev_priv->display.opregion.asle; + struct drm_i915_private *dev_priv = opregion->i915; + struct opregion_asle *asle = opregion->asle; u32 aslc_stat = 0; u32 aslc_req; @@ -632,11 +658,17 @@ static void asle_work(struct work_struct *work) asle->aslc = aslc_stat; } -void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) +bool intel_opregion_asle_present(struct drm_i915_private *i915) +{ + return i915->display.opregion && i915->display.opregion->asle; +} + +void intel_opregion_asle_intr(struct drm_i915_private *i915) { - if (dev_priv->display.opregion.asle) - queue_work(dev_priv->unordered_wq, - &dev_priv->display.opregion.asle_work); + struct intel_opregion *opregion = i915->display.opregion; + + if (opregion && opregion->asle) + queue_work(i915->unordered_wq, &opregion->asle_work); } #define ACPI_EV_DISPLAY_SWITCH (1<<0) @@ -692,7 +724,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) static void intel_didl_outputs(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->display.opregion; + struct intel_opregion *opregion = dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0, max_outputs; @@ -731,7 +763,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) static void intel_setup_cadls(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->display.opregion; + struct intel_opregion *opregion = dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0; @@ -761,7 +793,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) static void swsci_setup(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->display.opregion; + struct intel_opregion *opregion = dev_priv->display.opregion; bool requested_callbacks = false; u32 tmp; @@ -839,7 +871,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->display.opregion; + struct intel_opregion *opregion = dev_priv->display.opregion; const struct firmware *fw = NULL; const char *name = dev_priv->display.params.vbt_firmware; int ret; @@ -879,7 +911,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) int intel_opregion_setup(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->display.opregion; + struct intel_opregion *opregion; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; @@ -902,11 +934,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) return -ENOTSUPP; } + opregion = kzalloc(sizeof(*opregion), GFP_KERNEL); + if (!opregion) + return -ENOMEM; + + opregion->i915 = dev_priv; + dev_priv->display.opregion = opregion; + INIT_WORK(&opregion->asle_work, asle_work); base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB); - if (!base) - return -ENOMEM; + if (!base) { + err = -ENOMEM; + goto err_memremap; + } memcpy(buf, base, sizeof(buf)); @@ -916,7 +957,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) goto err_out; } opregion->header = base; - opregion->lid_state = base + ACPI_CLID; drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n", opregion->header->over.major, @@ -1034,6 +1074,10 @@ out: err_out: memunmap(base); +err_memremap: + kfree(opregion); + dev_priv->display.opregion = NULL; + return err; } @@ -1106,12 +1150,12 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con { struct drm_connector *connector = &intel_connector->base; struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; const struct drm_edid *drm_edid; const void *edid; int len; - if (!opregion->asle_ext) + if (!opregion || !opregion->asle_ext) return NULL; edid = opregion->asle_ext->bddc; @@ -1132,10 +1176,28 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con return drm_edid; } +const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) +{ + struct intel_opregion *opregion = i915->display.opregion; + + if (!opregion || !opregion->vbt) + return NULL; + + if (size) + *size = opregion->vbt_size; + + return opregion->vbt; +} + bool intel_opregion_headless_sku(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; - struct opregion_header *header = opregion->header; + struct intel_opregion *opregion = i915->display.opregion; + struct opregion_header *header; + + if (!opregion) + return false; + + header = opregion->header; if (!header || header->over.major < 2 || (header->over.major == 2 && header->over.minor < 3)) @@ -1146,9 +1208,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915) void intel_opregion_register(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; - if (!opregion->header) + if (!opregion) return; if (opregion->acpi) { @@ -1162,7 +1224,7 @@ void intel_opregion_register(struct drm_i915_private *i915) static void intel_opregion_resume_display(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; if (opregion->acpi) { intel_didl_outputs(i915); @@ -1188,9 +1250,9 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915) void intel_opregion_resume(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; - if (!opregion->header) + if (!opregion) return; if (HAS_DISPLAY(i915)) @@ -1201,12 +1263,12 @@ void intel_opregion_resume(struct drm_i915_private *i915) static void intel_opregion_suspend_display(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; if (opregion->asle) opregion->asle->ardy = ASLE_ARDY_NOT_READY; - cancel_work_sync(&i915->display.opregion.asle_work); + cancel_work_sync(&opregion->asle_work); if (opregion->acpi) opregion->acpi->drdy = 0; @@ -1214,9 +1276,9 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915) void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; - if (!opregion->header) + if (!opregion) return; intel_opregion_notify_adapter(i915, state); @@ -1227,11 +1289,11 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) void intel_opregion_unregister(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; intel_opregion_suspend(i915, PCI_D1); - if (!opregion->header) + if (!opregion) return; if (opregion->acpi_notifier.notifier_call) { @@ -1242,26 +1304,36 @@ void intel_opregion_unregister(struct drm_i915_private *i915) void intel_opregion_cleanup(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->display.opregion; + struct intel_opregion *opregion = i915->display.opregion; - if (!opregion->header) + if (!opregion) return; - /* just clear all opregion memory pointers now */ memunmap(opregion->header); - if (opregion->rvda) { + if (opregion->rvda) memunmap(opregion->rvda); - opregion->rvda = NULL; - } - if (opregion->vbt_firmware) { - kfree(opregion->vbt_firmware); - opregion->vbt_firmware = NULL; - } - opregion->header = NULL; - opregion->acpi = NULL; - opregion->swsci = NULL; - opregion->asle = NULL; - opregion->asle_ext = NULL; - opregion->vbt = NULL; - opregion->lid_state = NULL; + kfree(opregion->vbt_firmware); + kfree(opregion); + i915->display.opregion = NULL; +} + +static int intel_opregion_show(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = m->private; + struct intel_opregion *opregion = i915->display.opregion; + + if (opregion) + seq_write(m, opregion->header, OPREGION_SIZE); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_opregion); + +void intel_opregion_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_opregion", 0444, minor->debugfs_root, + i915, &intel_opregion_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h index fd2ea8ef0fa2..0bec224f711f 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.h +++ b/drivers/gpu/drm/i915/display/intel_opregion.h @@ -25,38 +25,13 @@ #ifndef _INTEL_OPREGION_H_ #define _INTEL_OPREGION_H_ -#include <linux/workqueue.h> #include <linux/pci.h> +#include <linux/types.h> struct drm_i915_private; struct intel_connector; struct intel_encoder; -struct opregion_header; -struct opregion_acpi; -struct opregion_swsci; -struct opregion_asle; -struct opregion_asle_ext; - -struct intel_opregion { - struct opregion_header *header; - struct opregion_acpi *acpi; - struct opregion_swsci *swsci; - u32 swsci_gbda_sub_functions; - u32 swsci_sbcb_sub_functions; - struct opregion_asle *asle; - struct opregion_asle_ext *asle_ext; - void *rvda; - void *vbt_firmware; - const void *vbt; - u32 vbt_size; - u32 *lid_state; - struct work_struct asle_work; - struct notifier_block acpi_notifier; -}; - -#define OPREGION_SIZE (8 * 1024) - #ifdef CONFIG_ACPI int intel_opregion_setup(struct drm_i915_private *dev_priv); @@ -69,6 +44,7 @@ void intel_opregion_resume(struct drm_i915_private *dev_priv); void intel_opregion_suspend(struct drm_i915_private *dev_priv, pci_power_t state); +bool intel_opregion_asle_present(struct drm_i915_private *i915); void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); @@ -77,8 +53,12 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector); +const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size); + bool intel_opregion_headless_sku(struct drm_i915_private *i915); +void intel_opregion_debugfs_register(struct drm_i915_private *i915); + #else /* CONFIG_ACPI*/ static inline int intel_opregion_setup(struct drm_i915_private *dev_priv) @@ -107,6 +87,11 @@ static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv, { } +static inline bool intel_opregion_asle_present(struct drm_i915_private *i915) +{ + return false; +} + static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { } @@ -134,11 +119,21 @@ intel_opregion_get_edid(struct intel_connector *connector) return NULL; } +static inline const void * +intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) +{ + return NULL; +} + static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915) { return false; } +static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915) +{ +} + #endif /* CONFIG_ACPI */ #endif diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 0d8e5320a4f8..073ea3166c36 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -37,6 +37,7 @@ #include "intel_backlight.h" #include "intel_connector.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_lvds_regs.h" @@ -683,6 +684,9 @@ intel_panel_detect(struct drm_connector *connector, bool force) if (!intel_display_device_enabled(i915)) return connector_status_disconnected; + if (!intel_display_driver_check_access(i915)) + return connector->status; + return connector_status_connected; } diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index a55c09cbd0e4..ada1792df5b3 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -3,9 +3,11 @@ * Copyright © 2021 Intel Corporation */ +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_crtc.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fb.h" @@ -13,20 +15,21 @@ #include "intel_plane_initial.h" static bool -intel_reuse_initial_plane_obj(struct drm_i915_private *i915, - const struct intel_initial_plane_config *plane_config, +intel_reuse_initial_plane_obj(struct intel_crtc *this, + const struct intel_initial_plane_config plane_configs[], struct drm_framebuffer **fb, struct i915_vma **vma) { + struct drm_i915_private *i915 = to_i915(this->base.dev); struct intel_crtc *crtc; for_each_intel_crtc(&i915->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_plane_state *plane_state = + const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); if (!crtc_state->uapi.active) continue; @@ -34,7 +37,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915, if (!plane_state->ggtt_vma) continue; - if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { + if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) { *fb = plane_state->hw.fb; *vma = plane_state->ggtt_vma; return true; @@ -44,12 +47,100 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915, return false; } +static bool +initial_plane_phys_lmem(struct drm_i915_private *i915, + struct intel_initial_plane_config *plane_config) +{ + gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm; + struct intel_memory_region *mem; + dma_addr_t dma_addr; + gen8_pte_t pte; + u32 base; + + base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); + + gte += base / I915_GTT_PAGE_SIZE; + + pte = ioread64(gte); + if (!(pte & GEN12_GGTT_PTE_LM)) { + drm_err(&i915->drm, + "Initial plane programming missing PTE_LM bit\n"); + return false; + } + + dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK; + + if (IS_DGFX(i915)) + mem = i915->mm.regions[INTEL_REGION_LMEM_0]; + else + mem = i915->mm.stolen_region; + if (!mem) { + drm_dbg_kms(&i915->drm, + "Initial plane memory region not initialized\n"); + return false; + } + + /* + * On lmem we don't currently expect this to + * ever be placed in the stolen portion. + */ + if (dma_addr < mem->region.start || dma_addr > mem->region.end) { + drm_err(&i915->drm, + "Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n", + &dma_addr, mem->region.name, &mem->region.start, &mem->region.end); + return false; + } + + drm_dbg(&i915->drm, + "Using dma_addr=%pa, based on initial plane programming\n", + &dma_addr); + + plane_config->phys_base = dma_addr - mem->region.start; + plane_config->mem = mem; + + return true; +} + +static bool +initial_plane_phys_smem(struct drm_i915_private *i915, + struct intel_initial_plane_config *plane_config) +{ + struct intel_memory_region *mem; + u32 base; + + base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); + + mem = i915->mm.stolen_region; + if (!mem) { + drm_dbg_kms(&i915->drm, + "Initial plane memory region not initialized\n"); + return false; + } + + /* FIXME get and validate the dma_addr from the PTE */ + plane_config->phys_base = base; + plane_config->mem = mem; + + return true; +} + +static bool +initial_plane_phys(struct drm_i915_private *i915, + struct intel_initial_plane_config *plane_config) +{ + if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915)) + return initial_plane_phys_lmem(i915, plane_config); + else + return initial_plane_phys_smem(i915, plane_config); +} + static struct i915_vma * initial_plane_vma(struct drm_i915_private *i915, struct intel_initial_plane_config *plane_config) { struct intel_memory_region *mem; struct drm_i915_gem_object *obj; + struct drm_mm_node orig_mm = {}; struct i915_vma *vma; resource_size_t phys_base; u32 base, size; @@ -58,45 +149,13 @@ initial_plane_vma(struct drm_i915_private *i915, if (plane_config->size == 0) return NULL; - base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); - if (IS_DGFX(i915)) { - gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm; - gen8_pte_t pte; - - gte += base / I915_GTT_PAGE_SIZE; - - pte = ioread64(gte); - if (!(pte & GEN12_GGTT_PTE_LM)) { - drm_err(&i915->drm, - "Initial plane programming missing PTE_LM bit\n"); - return NULL; - } - - phys_base = pte & I915_GTT_PAGE_MASK; - mem = i915->mm.regions[INTEL_REGION_LMEM_0]; - - /* - * We don't currently expect this to ever be placed in the - * stolen portion. - */ - if (phys_base >= resource_size(&mem->region)) { - drm_err(&i915->drm, - "Initial plane programming using invalid range, phys_base=%pa\n", - &phys_base); - return NULL; - } - - drm_dbg(&i915->drm, - "Using phys_base=%pa, based on initial plane programming\n", - &phys_base); - } else { - phys_base = base; - mem = i915->mm.stolen_region; - } - - if (!mem) + if (!initial_plane_phys(i915, plane_config)) return NULL; + phys_base = plane_config->phys_base; + mem = plane_config->mem; + + base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); size = round_up(plane_config->base + plane_config->size, mem->min_page_size); size -= base; @@ -108,14 +167,19 @@ initial_plane_vma(struct drm_i915_private *i915, */ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) && mem == i915->mm.stolen_region && - size * 2 > i915->dsm.usable_size) + size * 2 > i915->dsm.usable_size) { + drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n"); return NULL; + } obj = i915_gem_object_create_region_at(mem, phys_base, size, I915_BO_ALLOC_USER | I915_BO_PREALLOC); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { + drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n", + mem->region.name); return NULL; + } /* * Mark it WT ahead of time to avoid changing the @@ -139,23 +203,66 @@ initial_plane_vma(struct drm_i915_private *i915, goto err_obj; } + /* + * MTL GOP likes to place the framebuffer high up in ggtt, + * which can cause problems for ggtt_reserve_guc_top(). + * Try to pin it to a low ggtt address instead to avoid that. + */ + base = 0; + + if (base != plane_config->base) { + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; + int ret; + + /* + * Make sure the original and new locations + * can't overlap. That would corrupt the original + * PTEs which are still being used for scanout. + */ + ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm, + size, plane_config->base, + I915_COLOR_UNEVICTABLE, PIN_NOEVICT); + if (ret) + goto err_obj; + } + vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL); if (IS_ERR(vma)) goto err_obj; +retry: pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base; - if (HAS_GMCH(i915)) + if (!i915_gem_object_is_lmem(obj)) pinctl |= PIN_MAPPABLE; - if (i915_vma_pin(vma, 0, 0, pinctl)) + if (i915_vma_pin(vma, 0, 0, pinctl)) { + if (drm_mm_node_allocated(&orig_mm)) { + drm_mm_remove_node(&orig_mm); + /* + * Try again, but this time pin + * it to its original location. + */ + base = plane_config->base; + goto retry; + } goto err_obj; + } if (i915_gem_object_is_tiled(obj) && !i915_vma_is_map_and_fenceable(vma)) goto err_obj; + if (drm_mm_node_allocated(&orig_mm)) + drm_mm_remove_node(&orig_mm); + + drm_dbg_kms(&i915->drm, + "Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n", + i915_ggtt_offset(vma), plane_config->base); + return vma; err_obj: + if (drm_mm_node_allocated(&orig_mm)) + drm_mm_remove_node(&orig_mm); i915_gem_object_put(obj); return NULL; } @@ -210,10 +317,11 @@ err_vma: static void intel_find_initial_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) + struct intel_initial_plane_config plane_configs[]) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_initial_plane_config *plane_config = + &plane_configs[crtc->pipe]; struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = @@ -239,7 +347,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, * Failed to alloc the obj, check to see if we should share * an fb with another CRTC instead */ - if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma)) + if (intel_reuse_initial_plane_obj(crtc, plane_configs, &fb, &vma)) goto valid_fb; /* @@ -302,25 +410,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config) i915_vma_put(plane_config->vma); } -void intel_crtc_initial_plane_config(struct intel_crtc *crtc) +void intel_initial_plane_config(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_initial_plane_config plane_config = {}; + struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; - /* - * Note that reserving the BIOS fb up front prevents us - * from stuffing other stolen allocations like the ring - * on top. This prevents some ugliness at boot time, and - * can even allow for smooth boot transitions if the BIOS - * fb is large enough for the active pipe configuration. - */ - dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config); + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_initial_plane_config *plane_config = + &plane_configs[crtc->pipe]; - /* - * If the fb is shared between multiple heads, we'll - * just get the first one. - */ - intel_find_initial_plane_obj(crtc, &plane_config); + if (!to_intel_crtc_state(crtc->base.state)->uapi.active) + continue; + + /* + * Note that reserving the BIOS fb up front prevents us + * from stuffing other stolen allocations like the ring + * on top. This prevents some ugliness at boot time, and + * can even allow for smooth boot transitions if the BIOS + * fb is large enough for the active pipe configuration. + */ + i915->display.funcs.display->get_initial_plane_config(crtc, plane_config); - plane_config_fini(&plane_config); + /* + * If the fb is shared between multiple heads, we'll + * just get the first one. + */ + intel_find_initial_plane_obj(crtc, plane_configs); + + if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config)) + intel_crtc_wait_for_next_vblank(crtc); + + plane_config_fini(plane_config); + } } diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h index c7e35ab3182b..64ab95239cd4 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.h +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h @@ -6,8 +6,8 @@ #ifndef __INTEL_PLANE_INITIAL_H__ #define __INTEL_PLANE_INITIAL_H__ -struct intel_crtc; +struct drm_i915_private; -void intel_crtc_initial_plane_config(struct intel_crtc *crtc); +void intel_initial_plane_config(struct drm_i915_private *i915); #endif diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index a8fa3a20990e..2d65a538f83e 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -366,7 +366,7 @@ static bool intel_pps_is_valid(struct intel_dp *intel_dp) if (intel_dp->pps.pps_idx == 1 && INTEL_PCH_TYPE(i915) >= PCH_ICP && - INTEL_PCH_TYPE(i915) < PCH_MTP) + INTEL_PCH_TYPE(i915) <= PCH_ADP) return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; return true; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 57bbf3e3af92..72cadad09db5 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -173,6 +173,12 @@ * irrelevant for normal operation. */ +#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ + (intel_dp)->psr.source_support) + +#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \ + (intel_dp)->psr.source_panel_replay_support) + bool intel_encoder_can_psr(struct intel_encoder *encoder) { if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST) @@ -528,7 +534,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp) intel_dp_get_sink_sync_latency(intel_dp); if (DISPLAY_VER(i915) >= 9 && - intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { + intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { bool y_req = intel_dp->psr_dpcd[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; bool alpm = intel_dp_get_alpm_status(intel_dp); @@ -560,11 +566,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) if (intel_dp->psr_dpcd[0]) _psr_init_dpcd(intel_dp); - if (intel_dp->psr.sink_psr2_support) { - intel_dp->psr.colorimetry_support = - intel_dp_get_colorimetry_status(intel_dp); + if (intel_dp->psr.sink_psr2_support) intel_dp_get_su_granularity(intel_dp); - } } static void hsw_psr_setup_aux(struct intel_dp *intel_dp) @@ -604,6 +607,18 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) aux_ctl); } +static bool psr2_su_region_et_valid(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (DISPLAY_VER(i915) >= 20 && + intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED && + !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)) + return true; + + return false; +} + static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -619,6 +634,8 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; + if (psr2_su_region_et_valid(intel_dp)) + dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET; } else { if (intel_dp->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; @@ -762,8 +779,8 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) static int psr2_block_count_lines(struct intel_dp *intel_dp) { - return intel_dp->psr.io_wake_lines < 9 && - intel_dp->psr.fast_wake_lines < 9 ? 8 : 12; + return intel_dp->psr.alpm_parameters.io_wake_lines < 9 && + intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12; } static int psr2_block_count(struct intel_dp *intel_dp) @@ -800,6 +817,7 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp) static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_psr *psr = &intel_dp->psr; enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val = EDP_PSR2_ENABLE; u32 psr_val = 0; @@ -841,17 +859,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) */ int tmp; - tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; + tmp = map[psr->alpm_parameters.io_wake_lines - + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES); - tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); } else if (DISPLAY_VER(dev_priv) >= 12) { - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); - val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines); + val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines); } else if (DISPLAY_VER(dev_priv) >= 9) { - val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); - val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); + val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines); + val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) @@ -869,6 +888,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0); } + if (psr2_su_region_et_valid(intel_dp)) + val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE; + /* * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. @@ -1031,6 +1053,9 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return false; } + if (psr2_su_region_et_valid(intel_dp)) + crtc_state->enable_psr2_su_region_et = true; + return crtc_state->enable_psr2_sel_fetch = true; } @@ -1101,10 +1126,34 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; } -static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, +static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int check_entry_lines; + + if (DISPLAY_VER(i915) < 20) + return true; + + /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */ + check_entry_lines = 2 + + intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5); + + if (check_entry_lines > 15) + return false; + + if (i915->display.params.psr_safest_params) + check_entry_lines = 15; + + intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines; + + return true; +} + +static bool _compute_alpm_params(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; u8 max_wake_lines; @@ -1115,6 +1164,8 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, * it is not enough -> use 45 us. */ fast_wake_time = 45; + + /* TODO: Check how we can use ALPM_CTL fast wake extended field */ max_wake_lines = 12; } else { io_wake_time = 50; @@ -1131,12 +1182,15 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, fast_wake_lines > max_wake_lines) return false; + if (!_lnl_compute_alpm_params(intel_dp, crtc_state)) + return false; + if (i915->display.params.psr_safest_params) io_wake_lines = fast_wake_lines = max_wake_lines; /* According to Bspec lower limit should be set as 7 lines. */ - intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); - intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); + intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7); + intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7); return true; } @@ -1268,7 +1322,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { + if (!_compute_alpm_params(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, Unable to use long enough wake times\n"); return false; @@ -1377,10 +1431,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); - - crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); - intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, - &crtc_state->psr_vsc); } void intel_psr_get_config(struct intel_encoder *encoder, @@ -1504,6 +1554,21 @@ static void wm_optimization_wa(struct intel_dp *intel_dp, wa_16013835468_bit_get(intel_dp), 0); } +static void lnl_alpm_configure(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + struct intel_psr *psr = &intel_dp->psr; + + if (DISPLAY_VER(dev_priv) < 20) + return; + + intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), + ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE | + ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) | + ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines)); +} + static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -1569,6 +1634,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); + lnl_alpm_configure(intel_dp); + /* * Wa_16013835468 * Wa_14015648006 @@ -1634,7 +1701,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - struct intel_encoder *encoder = &dig_port->base; u32 val; drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); @@ -1662,7 +1728,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", intel_dp->psr.psr2_enabled ? "2" : "1"); - intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc); intel_snps_phy_update_psr_power_state(dev_priv, phy, true); intel_psr_enable_sink(intel_dp); intel_psr_enable_source(intel_dp, crtc_state); @@ -1951,7 +2016,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st } static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, - struct drm_rect *clip, bool full_update) + bool full_update) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1966,17 +2031,21 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, goto exit; } - if (clip->y1 == -1) + if (crtc_state->psr2_su_area.y1 == -1) goto exit; if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) { - val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1); - val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1); + val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1); + val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1); } else { - drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4); + drm_WARN_ON(crtc_state->uapi.crtc->dev, + crtc_state->psr2_su_area.y1 % 4 || + crtc_state->psr2_su_area.y2 % 4); - val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); - val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1); + val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR( + crtc_state->psr2_su_area.y1 / 4 + 1); + val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR( + crtc_state->psr2_su_area.y2 / 4 + 1); } exit: crtc_state->psr2_man_track_ctl = val; @@ -2002,8 +2071,7 @@ static void clip_area_update(struct drm_rect *overlap_damage_area, overlap_damage_area->y2 = damage_area->y2; } -static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state, - struct drm_rect *pipe_clip) +static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; @@ -2016,9 +2084,32 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c else y_alignment = crtc_state->su_y_granularity; - pipe_clip->y1 -= pipe_clip->y1 % y_alignment; - if (pipe_clip->y2 % y_alignment) - pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment; + crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; + if (crtc_state->psr2_su_area.y2 % y_alignment) + crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 / + y_alignment) + 1) * y_alignment; +} + +/* + * When early transport is in use we need to extend SU area to cover + * cursor fully when cursor is in SU area. + */ +static void +intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state, + struct intel_plane_state *cursor_state) +{ + struct drm_rect inter; + + if (!crtc_state->enable_psr2_su_region_et || + !cursor_state->uapi.visible) + return; + + inter = crtc_state->psr2_su_area; + if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst)) + return; + + clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst, + &crtc_state->pipe_src); } /* @@ -2061,8 +2152,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 }; - struct intel_plane_state *new_plane_state, *old_plane_state; + struct intel_plane_state *new_plane_state, *old_plane_state, + *cursor_plane_state = NULL; struct intel_plane *plane; bool full_update = false; int i, ret; @@ -2075,6 +2166,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, goto skip_sel_fetch_set_loop; } + crtc_state->psr2_su_area.x1 = 0; + crtc_state->psr2_su_area.y1 = -1; + crtc_state->psr2_su_area.x2 = INT_MAX; + crtc_state->psr2_su_area.y2 = -1; + /* * Calculate minimal selective fetch area of each plane and calculate * the pipe damaged area. @@ -2109,14 +2205,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (old_plane_state->uapi.visible) { damaged_area.y1 = old_plane_state->uapi.dst.y1; damaged_area.y2 = old_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &damaged_area, + clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src); } if (new_plane_state->uapi.visible) { damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &damaged_area, + clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src); } continue; @@ -2124,7 +2220,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, /* If alpha changed mark the whole plane area as damaged */ damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &damaged_area, + clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src); continue; } @@ -2141,7 +2237,14 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1; damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1; - clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); + clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src); + + /* + * Cursor plane new state is stored to adjust su area to cover + * cursor are fully. + */ + if (plane->id == PLANE_CURSOR) + cursor_plane_state = new_plane_state; } /* @@ -2150,7 +2253,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, * should identify cases where this happens and fix the area * calculation for those. */ - if (pipe_clip.y1 == -1) { + if (crtc_state->psr2_su_area.y1 == -1) { drm_info_once(&dev_priv->drm, "Selective fetch area calculation failed in pipe %c\n", pipe_name(crtc->pipe)); @@ -2164,13 +2267,17 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) && crtc_state->splitter.enable) - pipe_clip.y1 = 0; + crtc_state->psr2_su_area.y1 = 0; ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) return ret; - intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip); + /* Adjust su area to cover cursor fully as necessary */ + if (cursor_plane_state) + intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state); + + intel_psr2_sel_fetch_pipe_alignment(crtc_state); /* * Now that we have the pipe damaged area check if it intersect with @@ -2185,7 +2292,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, !new_plane_state->uapi.visible) continue; - inter = pipe_clip; + inter = crtc_state->psr2_su_area; sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) { sel_fetch_area->y1 = -1; @@ -2230,7 +2337,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, } skip_sel_fetch_set_loop: - psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); + psr2_man_trk_ctl_calc(crtc_state, full_update); return 0; } @@ -2799,6 +2906,9 @@ void intel_psr_init(struct intel_dp *intel_dp) else intel_dp->psr.source_support = true; + /* Disable early transport for now */ + intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE; + /* Set link_standby x link_off defaults */ if (DISPLAY_VER(dev_priv) < 12) /* For new platforms up to TGL let's respect VBT back again */ diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 143e0595c097..cde781df84d5 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -21,12 +21,6 @@ struct intel_encoder; struct intel_plane; struct intel_plane_state; -#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ - (intel_dp)->psr.source_support) - -#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \ - (intel_dp)->psr.source_panel_replay_support) - bool intel_encoder_can_psr(struct intel_encoder *encoder); void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_pre_plane_update(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index efe4306b37e0..8427a736f639 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -159,6 +159,7 @@ #define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28) #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 0) #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 1) +#define LNL_EDP_PSR2_SU_REGION_ET_ENABLE REG_BIT(27) #define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */ #define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */ #define EDP_MAX_SU_DISABLE_TIME_MASK REG_GENMASK(24, 20) @@ -245,6 +246,11 @@ #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) +/* PSR2 Early transport */ +#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074 + +#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A) + #define _SEL_FETCH_PLANE_BASE_1_A 0x70890 #define _SEL_FETCH_PLANE_BASE_2_A 0x708B0 #define _SEL_FETCH_PLANE_BASE_3_A 0x708D0 @@ -290,4 +296,61 @@ _SEL_FETCH_PLANE_OFFSET_1_A - \ _SEL_FETCH_PLANE_BASE_1_A) +#define _ALPM_CTL_A 0x60950 +#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A) +#define ALPM_CTL_ALPM_ENABLE REG_BIT(31) +#define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30) +#define ALPM_CTL_LOBF_ENABLE REG_BIT(29) +#define ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE REG_BIT(28) +#define ALPM_CTL_KEEP_FEC_ENABLE_FOR_AUX_WAKE_SLEEP REG_BIT(27) +#define ALPM_CTL_RESTORE_OCCURED REG_BIT(26) +#define ALPM_CTL_RESTORE_TO_SLEEP REG_BIT(25) +#define ALPM_CTL_RESTORE_TO_DEEP_SLEEP REG_BIT(24) +#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(23, 21) +#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 0) +#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_128_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 1) +#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_256_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 2) +#define ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_512_SYMBOLS REG_FIELD_PREP(ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_MASK, 3) +#define ALPM_CTL_AUX_WAKE_SLEEP_HOLD_ENABLE REG_BIT(20) +#define ALPM_CTL_ALPM_ENTRY_CHECK_MASK REG_GENMASK(19, 16) +#define ALPM_CTL_ALPM_ENTRY_CHECK(val) REG_FIELD_PREP(ALPM_CTL_ALPM_ENTRY_CHECK_MASK, val) +#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK REG_GENMASK(13, 8) +#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5 +#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES) +#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0) +#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val) + +#define _ALPM_CTL2_A 0x60954 +#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A) +#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24) +#define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val) +#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16) +#define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION(val) REG_FIELD_PREP(ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK, val) +#define ALPM_CTL2_NUMBER_OF_LTTPR_MASK REG_GENMASK(15, 12) +#define ALPM_CTL2_NUMBER_OF_LTTPR(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_OF_LTTPR_MASK, val) +#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK REG_GENMASK(10, 8) +#define ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME(val) REG_FIELD_PREP(ALPM_CTL2_LTTPR_AUX_LESS_SLEEP_HOLD_TIME_MASK, val) +#define ALPM_CTL2_FEC_DECODE_EN_POSITION_AFTER_WAKE_SR REG_BIT(4) +#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK REG_GENMASK(2, 0) +#define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val) + +#define _PORT_ALPM_CTL_A 0x16fa2c +#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A) +#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31) +#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20) +#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val) +#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16) +#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val) +#define PORT_ALPM_CTL_SILENCE_PERIOD_MASK REG_GENMASK(7, 0) +#define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val) + +#define _PORT_ALPM_LFPS_CTL_A 0x16fa30 +#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A) +#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31) +#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24) +#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5 +#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES) +#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0) +#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val) + #endif /* __INTEL_PSR_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index acc6b6804105..093106c1e101 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -44,6 +44,7 @@ #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" @@ -1209,7 +1210,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, struct intel_sdvo_tv_format format; u32 format_map; - format_map = 1 << conn_state->tv.mode; + format_map = 1 << conn_state->tv.legacy_mode; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); @@ -2140,6 +2141,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) if (!intel_display_device_enabled(i915)) return connector_status_disconnected; + if (!intel_display_driver_check_access(i915)) + return connector->status; + if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo_connector->output_flag)) return connector_status_unknown; @@ -2298,7 +2302,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector) * Read the list of supported input resolutions for the selected TV * format. */ - format_map = 1 << conn_state->tv.mode; + format_map = 1 << conn_state->tv.legacy_mode; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); @@ -2363,7 +2367,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, int i; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) - if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) { + if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) { *val = i; return 0; @@ -2419,7 +2423,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state); if (property == intel_sdvo_connector->tv_format) { - state->tv.mode = intel_sdvo_connector->tv_format_supported[val]; + state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val]; if (state->crtc) { struct drm_crtc_state *crtc_state = @@ -2805,6 +2809,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type) } else { intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } + intel_connector->base.polled = intel_connector->polled; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2880,6 +2885,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; + intel_connector->base.polled = intel_connector->polled; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; @@ -3076,7 +3082,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, drm_property_add_enum(intel_sdvo_connector->tv_format, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); - intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0]; + intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0]; drm_object_attach_property(&intel_sdvo_connector->base.base.base, intel_sdvo_connector->tv_format, 0); return true; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index dcf05e00e505..6b374d481cd9 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -122,6 +122,15 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port) return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY); } +bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + struct intel_tc_port *tc = to_tc_port(dig_port); + + return intel_phy_is_tc(i915, phy) && !tc->legacy_port; +} + /* * The display power domains used for TC ports depending on the * platform and TC mode (legacy, DP-alt, TBT): @@ -986,10 +995,11 @@ xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); assert_tc_cold_blocked(tc); - return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE; + return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE; } static bool @@ -1012,16 +1022,17 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); u32 val; assert_tc_cold_blocked(tc); - val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); + val = intel_de_read(i915, reg); if (enable) val |= XELPDP_TCSS_POWER_REQUEST; else val &= ~XELPDP_TCSS_POWER_REQUEST; - intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); + intel_de_write(i915, reg, val); } static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) @@ -1055,26 +1066,28 @@ static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); u32 val; assert_tc_cold_blocked(tc); - val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); + val = intel_de_read(i915, reg); if (take) val |= XELPDP_TC_PHY_OWNERSHIP; else val &= ~XELPDP_TC_PHY_OWNERSHIP; - intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); + intel_de_write(i915, reg, val); } static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); assert_tc_cold_blocked(tc); - return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP; + return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP; } static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) @@ -1590,7 +1603,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, * connected ports are usable, and avoids exposing to the users objects they * can't really use. */ -bool intel_tc_port_connected_locked(struct intel_encoder *encoder) +bool intel_tc_port_connected(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -1605,19 +1618,6 @@ bool intel_tc_port_connected_locked(struct intel_encoder *encoder) return tc_phy_hpd_live_status(tc) & mask; } -bool intel_tc_port_connected(struct intel_encoder *encoder) -{ - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct intel_tc_port *tc = to_tc_port(dig_port); - bool is_connected; - - mutex_lock(&tc->lock); - is_connected = intel_tc_port_connected_locked(encoder); - mutex_unlock(&tc->lock); - - return is_connected; -} - static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) { bool ret; diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 80a61e52850e..26c4265368c1 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -15,9 +15,9 @@ struct intel_encoder; bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port); bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port); bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port); +bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port); bool intel_tc_port_connected(struct intel_encoder *encoder); -bool intel_tc_port_connected_locked(struct intel_encoder *encoder); u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index d4386cb3569e..2b77d399f1a1 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -40,6 +40,7 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_irq.h" +#include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_dpll.h" #include "intel_hotplug.h" @@ -949,7 +950,7 @@ intel_disable_tv(struct intel_atomic_state *state, static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) { - int format = conn_state->tv.mode; + int format = conn_state->tv.legacy_mode; return &tv_modes[format]; } @@ -1327,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, * the active portion. Hence following this formula seems * more trouble that it's worth. * - * if (GRAPHICS_VER(dev_priv) == 4) { + * if (DISPLAY_VER(dev_priv) == 4) { * num = cdclk * (tv_mode->oversample >> !tv_mode->progressive); * den = tv_mode->clock; * } else { @@ -1704,7 +1705,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) break; } - connector->state->tv.mode = i; + connector->state->tv.legacy_mode = i; } static int @@ -1723,6 +1724,9 @@ intel_tv_detect(struct drm_connector *connector, if (!intel_display_device_enabled(i915)) return connector_status_disconnected; + if (!intel_display_driver_check_access(i915)) + return connector->status; + if (force) { struct drm_atomic_state *state; @@ -1859,7 +1863,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector, old_state = drm_atomic_get_old_connector_state(state, connector); new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); - if (old_state->tv.mode != new_state->tv.mode || + if (old_state->tv.legacy_mode != new_state->tv.legacy_mode || old_state->tv.margins.left != new_state->tv.margins.left || old_state->tv.margins.right != new_state->tv.margins.right || old_state->tv.margins.top != new_state->tv.margins.top || @@ -1896,7 +1900,7 @@ static void intel_tv_add_properties(struct drm_connector *connector) conn_state->tv.margins.right = 46; conn_state->tv.margins.bottom = 37; - conn_state->tv.mode = 0; + conn_state->tv.legacy_mode = 0; /* Create TV properties then attach current values */ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { @@ -1910,7 +1914,7 @@ static void intel_tv_add_properties(struct drm_connector *connector) drm_object_attach_property(&connector->base, i915->drm.mode_config.legacy_tv_mode_property, - conn_state->tv.mode); + conn_state->tv.legacy_mode); drm_object_attach_property(&connector->base, i915->drm.mode_config.tv_left_margin_property, conn_state->tv.margins.left); @@ -1990,6 +1994,7 @@ intel_tv_init(struct drm_i915_private *dev_priv) * More recent chipsets favour HDMI rather than integrated S-Video. */ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; + intel_connector->base.polled = intel_connector->polled; drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index fe256bf7b485..baf7354cb6e2 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_vblank.h" @@ -581,3 +582,132 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, intel_vblank_section_exit(i915); spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags); } + +static int intel_mode_vblank_start(const struct drm_display_mode *mode) +{ + int vblank_start = mode->crtc_vblank_start; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vblank_start = DIV_ROUND_UP(vblank_start, 2); + + return vblank_start; +} + +void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state, + struct intel_vblank_evade_ctx *evade) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state; + const struct drm_display_mode *adjusted_mode; + + evade->crtc = crtc; + + evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) && + intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); + + /* + * During fastsets/etc. the transcoder is still + * running with the old timings at this point. + * + * TODO: maybe just use the active timings here? + */ + if (intel_crtc_needs_modeset(new_crtc_state)) + crtc_state = new_crtc_state; + else + crtc_state = old_crtc_state; + + adjusted_mode = &crtc_state->hw.adjusted_mode; + + if (crtc->mode_flags & I915_MODE_FLAG_VRR) { + /* timing changes should happen with VRR disabled */ + drm_WARN_ON(crtc->base.dev, intel_crtc_needs_modeset(new_crtc_state) || + new_crtc_state->update_m_n || new_crtc_state->update_lrr); + + if (intel_vrr_is_push_sent(crtc_state)) + evade->vblank_start = intel_vrr_vmin_vblank_start(crtc_state); + else + evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state); + } else { + evade->vblank_start = intel_mode_vblank_start(adjusted_mode); + } + + /* FIXME needs to be calibrated sensibly */ + evade->min = evade->vblank_start - intel_usecs_to_scanlines(adjusted_mode, + VBLANK_EVASION_TIME_US); + evade->max = evade->vblank_start - 1; + + /* + * M/N and TRANS_VTOTAL are double buffered on the transcoder's + * undelayed vblank, so with seamless M/N and LRR we must evade + * both vblanks. + * + * DSB execution waits for the transcoder's undelayed vblank, + * hence we must kick off the commit before that. + */ + if (new_crtc_state->dsb || new_crtc_state->update_m_n || new_crtc_state->update_lrr) + evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; +} + +/* must be called with vblank interrupt already enabled! */ +int intel_vblank_evade(struct intel_vblank_evade_ctx *evade) +{ + struct intel_crtc *crtc = evade->crtc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + long timeout = msecs_to_jiffies_timeout(1); + wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); + DEFINE_WAIT(wait); + int scanline; + + if (evade->min <= 0 || evade->max <= 0) + return 0; + + for (;;) { + /* + * prepare_to_wait() has a memory barrier, which guarantees + * other CPUs can see the task state update by the time we + * read the scanline. + */ + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + + scanline = intel_get_crtc_scanline(crtc); + if (scanline < evade->min || scanline > evade->max) + break; + + if (!timeout) { + drm_err(&i915->drm, + "Potential atomic update failure on pipe %c\n", + pipe_name(crtc->pipe)); + break; + } + + local_irq_enable(); + + timeout = schedule_timeout(timeout); + + local_irq_disable(); + } + + finish_wait(wq, &wait); + + /* + * On VLV/CHV DSI the scanline counter would appear to + * increment approx. 1/3 of a scanline before start of vblank. + * The registers still get latched at start of vblank however. + * This means we must not write any registers on the first + * line of vblank (since not the whole line is actually in + * vblank). And unfortunately we can't use the interrupt to + * wait here since it will fire too soon. We could use the + * frame start interrupt instead since it will fire after the + * critical scanline, but that would require more changes + * in the interrupt code. So for now we'll just do the nasty + * thing and poll for the bad scanline to pass us by. + * + * FIXME figure out if BXT+ DSI suffers from this as well + */ + while (evade->need_vlv_dsi_wa && scanline == evade->vblank_start) + scanline = intel_get_crtc_scanline(crtc); + + return scanline; +} diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h index 17636f140c71..ec6c3da3eeac 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.h +++ b/drivers/gpu/drm/i915/display/intel_vblank.h @@ -13,6 +13,18 @@ struct drm_crtc; struct intel_crtc; struct intel_crtc_state; +struct intel_vblank_evade_ctx { + struct intel_crtc *crtc; + int min, max, vblank_start; + bool need_vlv_dsi_wa; +}; + +void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state, + struct intel_vblank_evade_ctx *evade); +/* must be called with vblank interrupt already enabled! */ +int intel_vblank_evade(struct intel_vblank_evade_ctx *evade); + u32 i915_get_vblank_counter(struct drm_crtc *crtc); u32 g4x_get_vblank_counter(struct drm_crtc *crtc); bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h index 64f440fdc22b..8b21dc8e26d5 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -51,8 +51,8 @@ #define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) #define _DSCA_PPS_0 0x6B200 #define _DSCC_PPS_0 0x6BA00 -#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4) -#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4) +#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4) +#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4) #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 511dc1544854..392d93e97bf8 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -2624,3 +2624,31 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, error: kfree(intel_fb); } + +bool skl_fixup_initial_plane_config(struct intel_crtc *crtc, + const struct intel_initial_plane_config *plane_config) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + enum plane_id plane_id = plane->id; + enum pipe pipe = crtc->pipe; + u32 base; + + if (!plane_state->uapi.visible) + return false; + + base = intel_plane_ggtt_offset(plane_state); + + /* + * We may have moved the surface to a different + * part of ggtt, make the plane aware of that. + */ + if (plane_config->base == base) + return false; + + intel_de_write(i915, PLANE_SURF(pipe, plane_id), base); + + return true; +} diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h index be64c201f9b3..e92e00c01b29 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.h +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h @@ -22,6 +22,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, void skl_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config); +bool skl_fixup_initial_plane_config(struct intel_crtc *crtc, + const struct intel_initial_plane_config *plane_config); int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 56588d6e24ae..614f319d754e 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -443,12 +443,35 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; + new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); old_bw_state = intel_atomic_get_old_bw_state(state); + /* + * We store use_sagv_wm in the crtc state rather than relying on + * that bw state since we have no convenient way to get at the + * latter from the plane commit hooks (especially in the legacy + * cursor case). + * + * drm_atomic_check_only() gets upset if we pull more crtcs + * into the state, so we have to calculate this based on the + * individual intel_crtc_can_enable_sagv() rather than + * the overall intel_can_enable_sagv(). Otherwise the + * crtcs not included in the commit would not switch to the + * SAGV watermarks when we are about to enable SAGV, and that + * would lead to underruns. This does mean extra power draw + * when only a subset of the crtcs are blocking SAGV as the + * other crtcs can't be allowed to use the more optimal + * normal (ie. non-SAGV) watermarks. + */ + pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && + DISPLAY_VER(i915) >= 12 && + intel_crtc_can_enable_sagv(new_crtc_state); + if (intel_crtc_can_enable_sagv(new_crtc_state)) new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); else @@ -478,21 +501,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; - - /* - * We store use_sagv_wm in the crtc state rather than relying on - * that bw state since we have no convenient way to get at the - * latter from the plane commit hooks (especially in the legacy - * cursor case) - */ - pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && - DISPLAY_VER(i915) >= 12 && - intel_can_enable_sagv(i915, new_bw_state); - } - return 0; } @@ -1367,7 +1375,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) u64 data_rate = 0; for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) + if (plane_id == PLANE_CURSOR) continue; data_rate += crtc_state->rel_data_rate[plane_id]; @@ -1514,12 +1522,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, return 0; /* Allocate fixed number of blocks for cursor. */ - if (DISPLAY_VER(i915) < 20) { - cursor_size = skl_cursor_allocation(crtc_state, num_active); - iter.size -= cursor_size; - skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], - alloc->end - cursor_size, alloc->end); - } + cursor_size = skl_cursor_allocation(crtc_state, num_active); + iter.size -= cursor_size; + skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], + alloc->end - cursor_size, alloc->end); iter.data_rate = skl_total_relative_data_rate(crtc_state); @@ -1533,7 +1539,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) { + if (plane_id == PLANE_CURSOR) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; @@ -1581,7 +1587,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) + if (plane_id == PLANE_CURSOR) continue; if (DISPLAY_VER(i915) < 11 && diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 555022c0652c..d3a771afb083 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2160,12 +2160,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) #ifdef CONFIG_MMU_NOTIFIER if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { - read_lock(&eb->i915->mm.notifier_lock); - - /* - * count is always at least 1, otherwise __EXEC_USERPTR_USED - * could not have been set - */ for (i = 0; i < count; i++) { struct eb_vma *ev = &eb->vma[i]; struct drm_i915_gem_object *obj = ev->vma->obj; @@ -2177,8 +2171,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) if (err) break; } - - read_unlock(&eb->i915->mm.notifier_lock); } #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 0d812f4d787d..3b27218aabe2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -28,6 +28,13 @@ void i915_gem_suspend(struct drm_i915_private *i915) GEM_TRACE("%s\n", dev_name(i915->drm.dev)); intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0); + /* + * On rare occasions, we've observed the fence completion triggers + * free_engines asynchronously via rcu_call. Ensure those are done. + * This path is only called on suspend, so it's an acceptable cost. + */ + rcu_barrier(); + flush_workqueue(i915->wq); /* @@ -160,6 +167,9 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ + /* Like i915_gem_suspend, flush tasks staged from fence triggers */ + rcu_barrier(); + for_each_gt(gt, i915, i) intel_gt_suspend_late(gt); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index a4fb577eceb4..b09b74a2448b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem, return ERR_PTR(-EINVAL); if (!(flags & I915_BO_ALLOC_GPU_ONLY) && - offset + size > mem->io_size && + offset + size > resource_size(&mem->io) && !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt)) return ERR_PTR(-ENOSPC); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 8c88075eeab2..ad6dd7f3259b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) /* Exclude the reserved region from driver use */ mem->region.end = i915->dsm.reserved.start - 1; - mem->io_size = min(mem->io_size, resource_size(&mem->region)); + mem->io = DEFINE_RES_MEM(mem->io.start, + min(resource_size(&mem->io), + resource_size(&mem->region))); i915->dsm.usable_size = resource_size(&mem->region); @@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, * With discrete devices, where we lack a mappable aperture there is no * possible way to ever access this memory on the CPU side. */ - if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size && + if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) && !(flags & I915_BO_ALLOC_GPU_ONLY)) return -ENOSPC; @@ -826,7 +828,6 @@ static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { static int init_stolen_lmem(struct intel_memory_region *mem) { - struct drm_i915_private *i915 = mem->i915; int err; if (GEM_WARN_ON(resource_size(&mem->region) == 0)) @@ -838,14 +839,10 @@ static int init_stolen_lmem(struct intel_memory_region *mem) return 0; } - if (mem->io_size && - !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size)) + if (resource_size(&mem->io) && + !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io))) goto err_cleanup; - drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", - &mem->io_start); - drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start); - return 0; err_cleanup: @@ -855,7 +852,7 @@ err_cleanup: static int release_stolen_lmem(struct intel_memory_region *mem) { - if (mem->io_size) + if (resource_size(&mem->io)) io_mapping_fini(&mem->iomap); i915_gem_cleanup_stolen(mem->i915); return 0; @@ -938,13 +935,17 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, GEM_BUG_ON((dsm_base + dsm_size) > lmem_size); } else { /* Use DSM base address instead for stolen memory */ - dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK; + dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; if (WARN_ON(lmem_size < dsm_base)) return ERR_PTR(-ENODEV); dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); } - if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { + if (i915_direct_stolen_access(i915)) { + drm_dbg(&i915->drm, "Using direct DSM access\n"); + io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; + io_size = dsm_size; + } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { io_start = 0; io_size = 0; } else { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index a6b0aaf30cbe..27dcfd8a34bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -142,13 +142,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr, place->fpfn = offset >> PAGE_SHIFT; WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn)); place->lpfn = place->fpfn + (size >> PAGE_SHIFT); - } else if (mr->io_size && mr->io_size < mr->total) { + } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) { if (flags & I915_BO_ALLOC_GPU_ONLY) { place->flags |= TTM_PL_FLAG_TOPDOWN; } else { place->fpfn = 0; - WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn)); - place->lpfn = mr->io_size >> PAGE_SHIFT; + WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn)); + place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT; } } } @@ -1083,7 +1083,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct intel_memory_region *mr = obj->mm.placements[i]; unsigned int flags; - if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM) + if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM) continue; flags = obj->flags; @@ -1094,8 +1094,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) } if (err) { - drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n", - ERR_PTR(err)); + drm_dbg_ratelimited(dev, + "Unable to make resource CPU accessible(err = %pe)\n", + ERR_PTR(err)); dma_resv_unlock(bo->base.resv); ret = VM_FAULT_SIGBUS; goto out_rpm; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 1d3ebdf4069b..0e21ce9d3e5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -42,7 +42,6 @@ #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" -#include "i915_gem_userptr.h" #include "i915_scatterlist.h" #ifdef CONFIG_MMU_NOTIFIER @@ -61,36 +60,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { - struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - long r; - - if (!mmu_notifier_range_blockable(range)) - return false; - - write_lock(&i915->mm.notifier_lock); - mmu_interval_set_seq(mni, cur_seq); - - write_unlock(&i915->mm.notifier_lock); - - /* - * We don't wait when the process is exiting. This is valid - * because the object will be cleaned up anyway. - * - * This is also temporarily required as a hack, because we - * cannot currently force non-consistent batch buffers to preempt - * and reschedule by waiting on it, hanging processes on exit. - */ - if (current->flags & PF_EXITING) - return true; - - /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, - MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); - return true; } @@ -580,15 +550,3 @@ i915_gem_userptr_ioctl(struct drm_device *dev, #endif } -int i915_gem_init_userptr(struct drm_i915_private *dev_priv) -{ -#ifdef CONFIG_MMU_NOTIFIER - rwlock_init(&dev_priv->mm.notifier_lock); -#endif - - return 0; -} - -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) -{ -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h deleted file mode 100644 index 8dadb2f8436d..000000000000 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2021 Intel Corporation - */ - -#ifndef __I915_GEM_USERPTR_H__ -#define __I915_GEM_USERPTR_H__ - -struct drm_i915_private; - -int i915_gem_init_userptr(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); - -#endif /* __I915_GEM_USERPTR_H__ */ diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 2c51a2c452fc..99a9ade73956 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr, int err; total = 0; - size = mr->io_size; + size = resource_size(&mr->io); do { struct drm_i915_gem_object *obj; @@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg) struct intel_memory_region *mixed[] = { mr, system }; struct intel_memory_region *single[] = { mr }; struct ttm_resource_manager *man = mr->region_private; - resource_size_t saved_io_size; + struct resource saved_io; int err; if (mr->private) continue; - if (!mr->io_size) + if (!resource_size(&mr->io)) continue; /* * For testing purposes let's force small BAR, if not already * present. */ - saved_io_size = mr->io_size; - if (mr->io_size == mr->total) { - resource_size_t io_size = mr->io_size; + saved_io = mr->io; + if (resource_size(&mr->io) == mr->total) { + resource_size_t io_size = resource_size(&mr->io); io_size = rounddown_pow_of_two(io_size >> 1); if (io_size < PAGE_SIZE) continue; - mr->io_size = io_size; + mr->io = DEFINE_RES_MEM(mr->io.start, io_size); i915_ttm_buddy_man_force_visible_size(man, io_size >> PAGE_SHIFT); } @@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg) IGT_MMAP_MIGRATE_FAIL_GPU | IGT_MMAP_MIGRATE_UNFAULTABLE); out_io_size: - mr->io_size = saved_io_size; + mr->io = saved_io; i915_ttm_buddy_man_force_visible_size(man, - mr->io_size >> PAGE_SHIFT); + resource_size(&mr->io) >> PAGE_SHIFT); if (err) return err; } diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 86a04afff64b..e1bf13e3d307 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) static int mtl_dummy_pipe_control(struct i915_request *rq) { /* Wa_14016712196 */ - if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) || + if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(rq->i915)) { u32 *cs; @@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) flags |= PIPE_CONTROL_FLUSH_L3; /* Wa_14016712196 */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915)) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) /* dummy PIPE_CONTROL + depth flush */ cs = gen12_emit_pipe_control(cs, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 40687806d22a..1ade568ffbfa 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1190,7 +1190,8 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine) num = ARRAY_SIZE(xelpmp_regs); } } else { - if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || + if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) || + GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 1a8e2b7db013..5f8d86e25993 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -96,7 +96,8 @@ static void heartbeat_commit(struct i915_request *rq, static void show_heartbeat(const struct i915_request *rq, struct intel_engine_cs *engine) { - struct drm_printer p = drm_debug_printer("heartbeat"); + struct drm_printer p = drm_dbg_printer(&rq->i915->drm, DRM_UT_DRIVER, + "heartbeat"); if (!rq) { intel_engine_dump(engine, &p, @@ -290,6 +291,9 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine) heartbeat_commit(rq, &attr); GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); + /* Ensure the forced pulse gets a full period to execute */ + next_heartbeat(engine); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 21a7e3191c18..ec1cbe229f0e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -24,6 +24,7 @@ #include "intel_ring.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_reg.h" #include "i915_request.h" #include "i915_scatterlist.h" #include "i915_utils.h" @@ -1152,13 +1153,20 @@ static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { struct drm_i915_private *i915 = ggtt->vm.i915; + struct intel_uncore *uncore = ggtt->vm.gt->uncore; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); phys_addr_t phys_addr; u32 pte_flags; int ret; GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915)); - phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915); + + if (i915_direct_stolen_access(i915)) { + drm_dbg(&i915->drm, "Using direct GSM access\n"); + phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK; + } else { + phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915); + } if (needs_wc_ggtt_mapping(i915)) ggtt->gsm = ioremap_wc(phys_addr, size); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c index f0dea54880af..c0b202223940 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -176,27 +176,13 @@ static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id) return DIV_ROUND_CLOSEST_ULL(res, 1000); } -static u8 get_rc6_mask(struct intel_gt *gt) -{ - u8 mask = 0; - - if (HAS_RC6(gt->i915)) - mask |= BIT(0); - if (HAS_RC6p(gt->i915)) - mask |= BIT(1); - if (HAS_RC6pp(gt->i915)) - mask |= BIT(2); - - return mask; -} - static ssize_t rc6_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - return sysfs_emit(buff, "%x\n", get_rc6_mask(gt)); + return sysfs_emit(buff, "%x\n", gt->rc6.enabled); } static ssize_t rc6_enable_dev_show(struct device *dev, @@ -205,7 +191,7 @@ static ssize_t rc6_enable_dev_show(struct device *dev, { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name); - return sysfs_emit(buff, "%x\n", get_rc6_mask(gt)); + return sysfs_emit(buff, "%x\n", gt->rc6.enabled); } static u32 __rc6_residency_ms_show(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 86f73fe558ca..7811a8c9da06 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -24,7 +24,8 @@ bool i915_ggtt_require_binder(struct drm_i915_private *i915) { /* Wa_13010847436 & Wa_14019519902 */ - return MEDIA_VER_FULL(i915) == IP_VER(13, 0); + return !i915_direct_stolen_access(i915) && + MEDIA_VER_FULL(i915) == IP_VER(13, 0); } static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 353f93baaca0..25c1023eb5f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -495,7 +495,7 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915, memset(table, 0, sizeof(struct drm_i915_mocs_table)); table->unused_entries_index = I915_MOCS_PTE; - if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) { table->size = ARRAY_SIZE(mtl_mocs_table); table->table = mtl_mocs_table; table->n_entries = MTL_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 7090e4be29cb..8f4b3c8af09c 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -123,7 +123,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) * temporary wa and should be removed after fixing real cause * of forcewake timeouts. */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) pg_enable = GEN9_MEDIA_PG_ENABLE | GEN11_MEDIA_SAMPLER_PG_ENABLE; diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index f8512aee58a8..51bb27e10a4f 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem) int ret; if (!io_mapping_init_wc(&mem->iomap, - mem->io_start, - mem->io_size)) + mem->io.start, + resource_size(&mem->io))) return -EIO; ret = intel_region_ttm_init(mem); @@ -240,7 +240,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) lmem_size -= tile_stolen; } else { /* Stolen starts from GSMBASE without CCS */ - lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE); + lmem_size = intel_uncore_read64(&i915->uncore, GEN6_GSMBASE); } i915_resize_lmem_bar(i915, lmem_size); @@ -273,14 +273,6 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) if (err) goto err_region_put; - drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region); - drm_dbg(&i915->drm, "Local memory IO start: %pa\n", - &mem->io_start); - drm_info(&i915->drm, "Local memory IO size: %pa\n", - &mem->io_size); - drm_info(&i915->drm, "Local memory available: %pa\n", - &lmem_size); - if (io_size < lmem_size) drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n", (u64)io_size >> 20); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 6801f8b95c53..c8e9aa41fdea 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1015,7 +1015,8 @@ void intel_gt_set_wedged(struct intel_gt *gt) mutex_lock(>->reset.mutex); if (GEM_SHOW_DEBUG()) { - struct drm_printer p = drm_debug_printer(__func__); + struct drm_printer p = drm_dbg_printer(>->i915->drm, + DRM_UT_DRIVER, __func__); struct intel_engine_cs *engine; enum intel_engine_id id; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 3eacbc50caf8..d67d44611c28 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -789,8 +789,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine, dg2_ctx_gt_tuning_init(engine, wal); - if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER)) + /* + * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in + * gen12_emit_indirect_ctx_rcs() rather than here on some early + * steppings. + */ + if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || + IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))) wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false); } @@ -820,6 +825,9 @@ static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_18019271663 */ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); + + /* Wa_14019877138 */ + wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT); } static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, @@ -908,7 +916,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) goto done; - if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_ctx_workarounds_init(engine, wal); else if (IS_PONTEVECCHIO(i915)) ; /* noop; none at this time */ @@ -1233,7 +1241,8 @@ static void __set_mcr_steering(struct i915_wa_list *wal, static void debug_dump_steering(struct intel_gt *gt) { - struct drm_printer p = drm_debug_printer("MCR Steering:"); + struct drm_printer p = drm_dbg_printer(>->i915->drm, DRM_UT_DRIVER, + "MCR Steering:"); if (drm_debug_enabled(DRM_UT_DRIVER)) intel_gt_mcr_report_steering(&p, gt, false); @@ -1643,7 +1652,7 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - /* Wa_14018778641 / Wa_18018781329 */ + /* Wa_14018575942 / Wa_18018781329 */ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); /* Wa_22016670082 */ @@ -1710,7 +1719,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) */ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal) { - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); } @@ -1743,7 +1752,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) return; } - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_gt_workarounds_init(gt, wal); else if (IS_PONTEVECCHIO(i915)) pvc_gt_workarounds_init(gt, wal); @@ -2216,7 +2225,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) if (engine->gt->type == GT_MEDIA) ; /* none yet */ - else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_whitelist_build(engine); else if (IS_PONTEVECCHIO(i915)) pvc_whitelist_build(engine); @@ -2828,7 +2837,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt, { struct drm_i915_private *i915 = gt->i915; - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915)) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512); /* @@ -2881,7 +2890,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li } if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER)) + IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) || + IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) /* Wa_14017856879 */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH); diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index 47070cba7eb1..12eca750f7d0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -285,7 +285,8 @@ out_engine: intel_engine_pm_flush(engine); if (intel_engine_pm_is_awake(engine)) { - struct drm_printer p = drm_debug_printer(__func__); + struct drm_printer p = drm_dbg_printer(&engine->i915->drm, + DRM_UT_DRIVER, __func__); intel_engine_dump(engine, &p, "%s is still awake:%d after idle-barriers\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index bc441ce7b380..ef014df4c4fc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -122,9 +122,9 @@ static int __live_idle_pulse(struct intel_engine_cs *engine, GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); if (engine_sync_barrier(engine)) { - struct drm_printer m = drm_err_printer("pulse"); + struct drm_printer m = drm_err_printer(&engine->i915->drm, "pulse"); - pr_err("%s: no heartbeat pulse?\n", engine->name); + drm_printf(&m, "%s: no heartbeat pulse?\n", engine->name); intel_engine_dump(engine, &m, "%s", engine->name); err = -ETIME; @@ -136,10 +136,10 @@ static int __live_idle_pulse(struct intel_engine_cs *engine, pulse_unlock_wait(p); /* synchronize with the retirement callback */ if (!i915_active_is_idle(&p->active)) { - struct drm_printer m = drm_err_printer("pulse"); + struct drm_printer m = drm_err_printer(&engine->i915->drm, "pulse"); - pr_err("%s: heartbeat pulse did not flush idle tasks\n", - engine->name); + drm_printf(&m, "%s: heartbeat pulse did not flush idle tasks\n", + engine->name); i915_active_print(&p->active, &m); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index a7189c2d660c..1aa1446c8fb0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -62,12 +62,12 @@ int live_rc6_manual(void *arg) dt = ktime_get(); rc0_power = librapl_energy_uJ(); - msleep(250); + msleep(1000); rc0_power = librapl_energy_uJ() - rc0_power; dt = ktime_sub(ktime_get(), dt); res[1] = rc6_residency(rc6); if ((res[1] - res[0]) >> 10) { - pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n", + pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n", (res[1] - res[0]) >> 10); err = -EINVAL; goto out_unlock; diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c index 00b872b6380b..3941f2d6fa47 100644 --- a/drivers/gpu/drm/i915/gt/selftest_tlb.c +++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c @@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt) * of pages. To succeed with both allocations, especially in case of Small * BAR, try to allocate no more than quarter of mappable memory. */ - if (mr && size > mr->io_size / 4) - size = mr->io_size / 4; + if (mr && size > resource_size(&mr->io) / 4) + size = resource_size(&mr->io) / 4; return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 63724e17829a..f7372f736a77 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -377,8 +377,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset, CCS_MASK(engine->gt)) ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true); + /* + * some of the WA registers are MCR registers. As it is safe to + * use MCR form for non-MCR registers, for code simplicity, all + * WA registers are added with MCR form. + */ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg); + ret |= GUC_MCR_REG_ADD(gt, regset, wa->mcr_reg, wa->masked_reg); /* Be extra paranoid and include all whitelist registers. */ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) @@ -394,13 +399,13 @@ static int guc_mmio_regset_init(struct temp_regset *regset, ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false); if (GRAPHICS_VER(engine->i915) >= 12) { - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false); - ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL0)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL1)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL2)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL3)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL4)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL5)), false); + ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL6)), false); } return ret ? -1 : 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 0f79cb658518..52332bb14339 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -184,7 +184,7 @@ static int guc_wait_ucode(struct intel_guc *guc) * in the seconds range. However, there is a limit on how long an * individual wait_for() can wait. So wrap it in a loop. */ - before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps); + before_freq = intel_rps_read_actual_frequency(>->rps); before = ktime_get(); for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) { ret = wait_for(guc_load_done(uncore, &status, &success), 1000); @@ -192,7 +192,7 @@ static int guc_wait_ucode(struct intel_guc *guc) break; guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n", - count, intel_rps_read_actual_frequency(&uncore->gt->rps), status, + count, intel_rps_read_actual_frequency(>->rps), status, REG_FIELD_GET(GS_BOOTROM_MASK, status), REG_FIELD_GET(GS_UKERNEL_MASK, status)); } @@ -204,7 +204,7 @@ static int guc_wait_ucode(struct intel_guc *guc) u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status); guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n", - status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret); + status, delta_ms, intel_rps_read_actual_frequency(>->rps), ret); guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", REG_FIELD_GET(GS_MIA_IN_RESET, status), bootrom, ukernel, @@ -254,11 +254,11 @@ static int guc_wait_ucode(struct intel_guc *guc) guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", delta_ms, status, count, ret); guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", - intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq, + intel_rps_read_actual_frequency(>->rps), before_freq, intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); } else { guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", - delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), + delta_ms, intel_rps_read_actual_frequency(>->rps), before_freq, status, count, ret); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index a259f1118c5a..f3dcae4b9d45 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -236,6 +236,13 @@ set_context_destroyed(struct intel_context *ce) ce->guc_state.sched_state |= SCHED_STATE_DESTROYED; } +static inline void +clr_context_destroyed(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED; +} + static inline bool context_pending_disable(struct intel_context *ce) { return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE; @@ -613,6 +620,8 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, u32 g2h_len_dw, bool loop) { + int ret; + /* * We always loop when a send requires a reply (i.e. g2h_len_dw > 0), * so we don't handle the case where we don't get a reply because we @@ -623,7 +632,11 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, if (g2h_len_dw) atomic_inc(&guc->outstanding_submission_g2h); - return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); + ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); + if (ret) + atomic_dec(&guc->outstanding_submission_g2h); + + return ret; } int intel_guc_wait_for_pending_msg(struct intel_guc *guc, @@ -1362,7 +1375,45 @@ static void guc_enable_busyness_worker(struct intel_guc *guc) static void guc_cancel_busyness_worker(struct intel_guc *guc) { - cancel_delayed_work_sync(&guc->timestamp.work); + /* + * There are many different call stacks that can get here. Some of them + * hold the reset mutex. The busyness worker also attempts to acquire the + * reset mutex. Synchronously flushing a worker thread requires acquiring + * the worker mutex. Lockdep sees this as a conflict. It thinks that the + * flush can deadlock because it holds the worker mutex while waiting for + * the reset mutex, but another thread is holding the reset mutex and might + * attempt to use other worker functions. + * + * In practice, this scenario does not exist because the busyness worker + * does not block waiting for the reset mutex. It does a try-lock on it and + * immediately exits if the lock is already held. Unfortunately, the mutex + * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep + * annotation but not to the extent of explaining the 'might lock' is also a + * 'does not need to lock'. So one option would be to add more complex lockdep + * annotations to ignore the issue (if at all possible). A simpler option is to + * just not flush synchronously when a rest in progress. Given that the worker + * will just early exit and re-schedule itself anyway, there is no advantage + * to running it immediately. + * + * If a reset is not in progress, then the synchronous flush may be required. + * As noted many call stacks lead here, some during suspend and driver unload + * which do require a synchronous flush to make sure the worker is stopped + * before memory is freed. + * + * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through + * every possible call stack is unfeasible. It would be too intrusive to many + * areas that really don't care about the GuC backend. However, there is the + * 'reset_in_progress' flag available, so just use that. + * + * And note that in the case of a reset occurring during driver unload + * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set + * is fine because there is another cancel in _finish (when the reset flag is + * not). + */ + if (guc_to_gt(guc)->uc.reset_in_progress) + cancel_delayed_work(&guc->timestamp.work); + else + cancel_delayed_work_sync(&guc->timestamp.work); } static void __reset_guc_busyness_stats(struct intel_guc *guc) @@ -1613,6 +1664,11 @@ static void guc_flush_submissions(struct intel_guc *guc) spin_unlock_irqrestore(&sched_engine->lock, flags); } +void intel_guc_submission_flush_work(struct intel_guc *guc) +{ + flush_work(&guc->submission_state.destroyed_worker); +} + static void guc_flush_destroyed_contexts(struct intel_guc *guc); void intel_guc_submission_reset_prepare(struct intel_guc *guc) @@ -1948,8 +2004,16 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc) void intel_guc_submission_reset_finish(struct intel_guc *guc) { + /* + * Ensure the busyness worker gets cancelled even on a fatal wedge. + * Note that reset_prepare is not allowed to because it confuses lockdep. + */ + if (guc_submission_initialized(guc)) + guc_cancel_busyness_worker(guc); + /* Reset called during driver load or during wedge? */ if (unlikely(!guc_submission_initialized(guc) || + !intel_guc_is_fw_running(guc) || intel_gt_is_wedged(guc_to_gt(guc)))) { return; } @@ -3283,12 +3347,13 @@ static void guc_context_close(struct intel_context *ce) spin_unlock_irqrestore(&ce->guc_state.lock, flags); } -static inline void guc_lrc_desc_unpin(struct intel_context *ce) +static inline int guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); struct intel_gt *gt = guc_to_gt(guc); unsigned long flags; bool disabled; + int ret; GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); @@ -3299,18 +3364,41 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) spin_lock_irqsave(&ce->guc_state.lock, flags); disabled = submission_disabled(guc); if (likely(!disabled)) { + /* + * Take a gt-pm ref and change context state to be destroyed. + * NOTE: a G2H IRQ that comes after will put this gt-pm ref back + */ __intel_gt_pm_get(gt); set_context_destroyed(ce); clr_context_registered(ce); } spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { release_guc_id(guc, ce); __guc_context_destroy(ce); - return; + return 0; } - deregister_context(ce, ce->guc_id.id); + /* + * GuC is active, lets destroy this context, but at this point we can still be racing + * with suspend, so we undo everything if the H2G fails in deregister_context so + * that GuC reset will find this context during clean up. + */ + ret = deregister_context(ce, ce->guc_id.id); + if (ret) { + spin_lock(&ce->guc_state.lock); + set_context_registered(ce); + clr_context_destroyed(ce); + spin_unlock(&ce->guc_state.lock); + /* + * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements + * the wakeref immediately but per function spec usage call this after unlock. + */ + intel_wakeref_put_async(>->wakeref); + } + + return ret; } static void __guc_context_destroy(struct intel_context *ce) @@ -3378,7 +3466,22 @@ static void deregister_destroyed_contexts(struct intel_guc *guc) if (!ce) break; - guc_lrc_desc_unpin(ce); + if (guc_lrc_desc_unpin(ce)) { + /* + * This means GuC's CT link severed mid-way which could happen + * in suspend-resume corner cases. In this case, put the + * context back into the destroyed_contexts list which will + * get picked up on the next context deregistration event or + * purged in a GuC sanitization event (reset/unload/wedged/...). + */ + spin_lock_irqsave(&guc->submission_state.lock, flags); + list_add_tail(&ce->destroyed_link, + &guc->submission_state.destroyed_contexts); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + /* Bail now since the list might never be emptied if h2gs fail */ + break; + } + } } @@ -3389,6 +3492,17 @@ static void destroyed_worker_func(struct work_struct *w) struct intel_gt *gt = guc_to_gt(guc); intel_wakeref_t wakeref; + /* + * In rare cases we can get here via async context-free fence-signals that + * come very late in suspend flow or very early in resume flows. In these + * cases, GuC won't be ready but just skipping it here is fine as these + * pending-destroy-contexts get destroyed totally at GuC reset time at the + * end of suspend.. OR.. this worker can be picked up later on the next + * context destruction trigger after resume-completes + */ + if (!intel_guc_is_ready(guc)) + return; + with_intel_gt_pm(gt, wakeref) deregister_destroyed_contexts(guc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index c57b29cdb1a6..b6df75622d3b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -38,6 +38,8 @@ int intel_guc_wait_for_pending_msg(struct intel_guc *guc, bool interruptible, long timeout); +void intel_guc_submission_flush_work(struct intel_guc *guc); + static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) { return guc->submission_supported; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ba9e07fc2b57..0945b177d5f9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -6,6 +6,7 @@ #include <linux/types.h> #include "gt/intel_gt.h" +#include "gt/intel_rps.h" #include "intel_guc_reg.h" #include "intel_huc.h" #include "intel_huc_print.h" @@ -447,17 +448,68 @@ static const char *auth_mode_string(struct intel_huc *huc, return partial ? "clear media" : "all workloads"; } +/* + * Use a longer timeout for debug builds so that problems can be detected + * and analysed. But a shorter timeout for releases so that user's don't + * wait forever to find out there is a problem. Note that the only reason + * an end user should hit the timeout is in case of extreme thermal throttling. + * And a system that is that hot during boot is probably dead anyway! + */ +#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#define HUC_LOAD_RETRY_LIMIT 20 +#else +#define HUC_LOAD_RETRY_LIMIT 3 +#endif + int intel_huc_wait_for_auth_complete(struct intel_huc *huc, enum intel_huc_authentication_type type) { struct intel_gt *gt = huc_to_gt(huc); - int ret; + struct intel_uncore *uncore = gt->uncore; + ktime_t before, after, delta; + int ret, count; + u64 delta_ms; + u32 before_freq; - ret = __intel_wait_for_register(gt->uncore, - huc->status[type].reg, - huc->status[type].mask, - huc->status[type].value, - 2, 50, NULL); + /* + * The KMD requests maximum frequency during driver load, however thermal + * throttling can force the frequency down to minimum (although the board + * really should never get that hot in real life!). IFWI issues have been + * seen to cause sporadic failures to grant the higher frequency. And at + * minimum frequency, the authentication time can be in the seconds range. + * Note that there is a limit on how long an individual wait_for() can wait. + * So wrap it in a loop. + */ + before_freq = intel_rps_read_actual_frequency(>->rps); + before = ktime_get(); + for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) { + ret = __intel_wait_for_register(gt->uncore, + huc->status[type].reg, + huc->status[type].mask, + huc->status[type].value, + 2, 1000, NULL); + if (!ret) + break; + + huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n", + count, intel_rps_read_actual_frequency(>->rps), + huc->status[type].reg.reg); + } + after = ktime_get(); + delta = ktime_sub(after, before); + delta_ms = ktime_to_ms(delta); + + if (delta_ms > 50) { + huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", + delta_ms, huc->status[type].reg.reg, count, ret); + huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", + intel_rps_read_actual_frequency(>->rps), before_freq, + intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); + } else { + huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", + delta_ms, intel_rps_read_actual_frequency(>->rps), + before_freq, huc->status[type].reg.reg, count, ret); + } /* mark the load process as complete even if the wait failed */ delayed_huc_load_complete(huc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3872d309ed31..6dfe5d9456c6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -640,7 +640,7 @@ void intel_uc_reset_finish(struct intel_uc *uc) uc->reset_in_progress = false; /* Firmware expected to be running when this function is called */ - if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc)) + if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_reset_finish(guc); } @@ -690,6 +690,8 @@ void intel_uc_suspend(struct intel_uc *uc) return; } + intel_guc_submission_flush_work(guc); + with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { err = intel_guc_suspend(guc); if (err) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90f6c1ece57d..efcb00472be2 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, for (i = start; i < end; i += 4) { p = intel_gvt_find_mmio_info(gvt, i); if (p) { - WARN(1, "dup mmio definition offset %x\n", - info->offset); + WARN(1, "dup mmio definition offset %x\n", i); /* We return -EEXIST here to make GVT-g load fail. * So duplicated MMIO can be found as soon as diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faf21be724c3..4f74d867fe1a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -574,7 +574,7 @@ int intel_gvt_set_opregion(struct intel_vgpu *vgpu) ret = intel_vgpu_register_reg(vgpu, PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, - &intel_vgpu_regops_opregion, OPREGION_SIZE, + &intel_vgpu_regops_opregion, INTEL_GVT_OPREGION_SIZE, VFIO_REGION_INFO_FLAG_READ, base); return ret; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index db99c2ef66db..990eaa029d9c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -147,7 +147,7 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = obj_to_i915(obj); - if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) { switch (obj->pat_index) { case 0: return " WB"; case 1: return " WT"; diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index c7d7c3b7ecc6..9ee902d5b72c 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -681,7 +681,8 @@ i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) static void i915_welcome_messages(struct drm_i915_private *dev_priv) { if (drm_debug_enabled(DRM_UT_DRIVER)) { - struct drm_printer p = drm_debug_printer("i915 device info:"); + struct drm_printer p = drm_dbg_printer(&dev_priv->drm, DRM_UT_DRIVER, + "device info:"); struct intel_gt *gt; unsigned int i; @@ -1003,8 +1004,10 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_runtime_pm_disable(&i915->runtime_pm); intel_power_domains_disable(i915); + intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true); if (HAS_DISPLAY(i915)) { drm_kms_helper_poll_disable(&i915->drm); + intel_display_driver_disable_user_access(i915); drm_atomic_helper_shutdown(&i915->drm); } @@ -1014,6 +1017,9 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_runtime_pm_disable_interrupts(i915); intel_hpd_cancel_work(i915); + if (HAS_DISPLAY(i915)) + intel_display_driver_suspend_access(i915); + intel_suspend_encoders(i915); intel_shutdown_encoders(i915); @@ -1080,8 +1086,11 @@ static int i915_drm_suspend(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ intel_power_domains_disable(dev_priv); - if (HAS_DISPLAY(dev_priv)) + intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); + if (HAS_DISPLAY(dev_priv)) { drm_kms_helper_poll_disable(dev); + intel_display_driver_disable_user_access(dev_priv); + } pci_save_state(pdev); @@ -1092,6 +1101,9 @@ static int i915_drm_suspend(struct drm_device *dev) intel_runtime_pm_disable_interrupts(dev_priv); intel_hpd_cancel_work(dev_priv); + if (HAS_DISPLAY(dev_priv)) + intel_display_driver_suspend_access(dev_priv); + intel_suspend_encoders(dev_priv); /* Must be called before GGTT is suspended. */ @@ -1103,8 +1115,6 @@ static int i915_drm_suspend(struct drm_device *dev) opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_suspend(dev_priv, opregion_target_state); - intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); - dev_priv->suspend_count++; intel_dmc_suspend(dev_priv); @@ -1243,15 +1253,21 @@ static int i915_drm_resume(struct drm_device *dev) intel_display_driver_init_hw(dev_priv); intel_clock_gating_init(dev_priv); + + if (HAS_DISPLAY(dev_priv)) + intel_display_driver_resume_access(dev_priv); + intel_hpd_init(dev_priv); /* MST sideband requires HPD interrupts enabled */ intel_dp_mst_resume(dev_priv); intel_display_driver_resume(dev_priv); - intel_hpd_poll_disable(dev_priv); - if (HAS_DISPLAY(dev_priv)) + if (HAS_DISPLAY(dev_priv)) { + intel_display_driver_enable_user_access(dev_priv); drm_kms_helper_poll_enable(dev); + } + intel_hpd_poll_disable(dev_priv); intel_opregion_resume(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c index fa6852713bee..f58682505491 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.c +++ b/drivers/gpu/drm/i915/i915_drm_client.c @@ -53,7 +53,7 @@ obj_meminfo(struct drm_i915_gem_object *obj, obj->mm.region->id : INTEL_REGION_SMEM; const u64 sz = obj->base.size; - if (obj->base.handle_count > 1) + if (drm_gem_object_is_shared_for_memory_stats(&obj->base)) stats[id].shared += sz; else stats[id].private += sz; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 861567362abd..e81b3b2858ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -165,14 +165,6 @@ struct i915_gem_mm { struct notifier_block vmap_notifier; struct shrinker *shrinker; -#ifdef CONFIG_MMU_NOTIFIER - /** - * notifier_lock for mmu notifiers, memory may not be allocated - * while holding this lock. - */ - rwlock_t notifier_lock; -#endif - /* shrinker accounting, also useful for userland debugging */ u64 shrink_memory; u32 shrink_count; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 92758b6b41f0..1391c01d7663 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -48,7 +48,6 @@ #include "gem/i915_gem_object_frontbuffer.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gem_region.h" -#include "gem/i915_gem_userptr.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" @@ -1165,10 +1164,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; - ret = i915_gem_init_userptr(dev_priv); - if (ret) - return ret; - for_each_gt(gt, dev_priv, i) { intel_uc_fetch_firmwares(>->uc); intel_wopcm_init(>->wopcm); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d04660b60046..a0b784ebaddd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, dma_addr_t offset = dma - mem->region.start; void __iomem *s; - if (offset + PAGE_SIZE > mem->io_size) { + if (offset + PAGE_SIZE > resource_size(&mem->io)) { ret = -EINVAL; break; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2d695818f006..bd9d812b1afa 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915) struct intel_gt *gt = to_gt(i915); /* Wa_18013179988 */ - if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { intel_wakeref_t wakeref; u32 reg, shift; diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 00871ef99792..3baa2f54a86e 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915, info.probed_size = mr->total; if (mr->type == INTEL_MEMORY_LOCAL) - info.probed_cpu_visible_size = mr->io_size; + info.probed_cpu_visible_size = resource_size(&mr->io); else info.probed_cpu_visible_size = mr->total; @@ -551,6 +551,38 @@ static int query_hwconfig_blob(struct drm_i915_private *i915, return hwconfig->size; } +static int +query_guc_submission_version(struct drm_i915_private *i915, + struct drm_i915_query_item *query) +{ + struct drm_i915_query_guc_submission_version __user *query_ptr = + u64_to_user_ptr(query->data_ptr); + struct drm_i915_query_guc_submission_version ver; + struct intel_guc *guc = &to_gt(i915)->uc.guc; + const size_t size = sizeof(ver); + int ret; + + if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + return -ENODEV; + + ret = copy_query_item(&ver, size, size, query); + if (ret != 0) + return ret; + + if (ver.branch || ver.major || ver.minor || ver.patch) + return -EINVAL; + + ver.branch = 0; + ver.major = guc->submission_version.major; + ver.minor = guc->submission_version.minor; + ver.patch = guc->submission_version.patch; + + if (copy_to_user(query_ptr, &ver, size)) + return -EFAULT; + + return 0; +} + static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) = { query_topology_info, @@ -559,6 +591,7 @@ static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, query_memregion_info, query_hwconfig_blob, query_geometry_subslices, + query_guc_submission_version, }; int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 27dc903f0553..e00557e1a57f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3059,6 +3059,7 @@ #define MCURSOR_MODE_64_ARGB_AX (0x20 | MCURSOR_MODE_64_32B_AX) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 +#define _CURAPOS_ERLY_TPT 0x7008c #define CURSOR_POS_Y_SIGN REG_BIT(31) #define CURSOR_POS_Y_MASK REG_GENMASK(30, 16) #define CURSOR_POS_Y(y) REG_FIELD_PREP(CURSOR_POS_Y_MASK, (y)) @@ -3087,6 +3088,7 @@ #define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR) #define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE) #define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS) +#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT) #define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE) #define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A) #define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A) @@ -5412,6 +5414,9 @@ #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 #define GEN6_PCODE_DATA1 _MMIO(0x13812C) +#define MTL_PCODE_STOLEN_ACCESS _MMIO(0x138914) +#define STOLEN_ACCESS_ALLOWED 0x1 + /* IVYBRIDGE DPF */ #define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ #define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14) @@ -5652,6 +5657,10 @@ enum skl_power_gate { #define DP_TP_CTL_MODE_SST (0 << 27) #define DP_TP_CTL_MODE_MST (1 << 27) #define DP_TP_CTL_FORCE_ACT (1 << 25) +#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19) +#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19) +#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19) +#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19) #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18) #define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15) #define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8) @@ -5684,6 +5693,8 @@ enum skl_power_gate { /* Known as DDI_CTL_DE in MTL+ */ #define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B) #define DDI_BUF_CTL_ENABLE (1 << 31) +#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29) +#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28) #define DDI_BUF_TRANS_SELECT(n) ((n) << 24) #define DDI_BUF_EMP_MASK (0xf << 24) #define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20) @@ -6314,9 +6325,10 @@ enum skl_power_gate { #define GMS_MASK REG_GENMASK(15, 8) #define GGMS_MASK REG_GENMASK(7, 6) -#define GEN12_GSMBASE _MMIO(0x108100) -#define GEN12_DSMBASE _MMIO(0x1080C0) -#define GEN12_BDSM_MASK REG_GENMASK64(63, 20) +#define GEN6_GSMBASE _MMIO(0x108100) +#define GEN6_DSMBASE _MMIO(0x1080C0) +#define GEN6_BDSM_MASK REG_GENMASK64(31, 20) +#define GEN11_BDSM_MASK REG_GENMASK64(63, 20) #define XEHP_CLOCK_GATE_DIS _MMIO(0x101014) #define SGSI_SIDECLK_DIS REG_BIT(17) diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c index 60404dbb2e9f..df6437c37373 100644 --- a/drivers/gpu/drm/i915/i915_syncmap.c +++ b/drivers/gpu/drm/i915/i915_syncmap.c @@ -75,13 +75,10 @@ struct i915_syncmap { unsigned int height; unsigned int bitmap; struct i915_syncmap *parent; - /* - * Following this header is an array of either seqno or child pointers: - * union { - * u32 seqno[KSYNCMAP]; - * struct i915_syncmap *child[KSYNCMAP]; - * }; - */ + union { + DECLARE_FLEX_ARRAY(u32, seqno); + DECLARE_FLEX_ARRAY(struct i915_syncmap *, child); + }; }; /** @@ -99,13 +96,13 @@ void i915_syncmap_init(struct i915_syncmap **root) static inline u32 *__sync_seqno(struct i915_syncmap *p) { GEM_BUG_ON(p->height); - return (u32 *)(p + 1); + return p->seqno; } static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p) { GEM_BUG_ON(!p->height); - return (struct i915_syncmap **)(p + 1); + return p->child; } static inline unsigned int @@ -200,7 +197,7 @@ __sync_alloc_leaf(struct i915_syncmap *parent, u64 id) { struct i915_syncmap *p; - p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL); + p = kmalloc(struct_size(p, seqno, KSYNCMAP), GFP_KERNEL); if (unlikely(!p)) return NULL; @@ -282,7 +279,7 @@ static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno) unsigned int above; /* Insert a join above the current layer */ - next = kzalloc(sizeof(*next) + KSYNCMAP * sizeof(next), + next = kzalloc(struct_size(next, child, KSYNCMAP), GFP_KERNEL); if (unlikely(!next)) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 29fd02bf5ea8..6f9e7b354b54 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -8,6 +8,7 @@ #include <drm/drm_drv.h> #include "i915_drv.h" +#include "i915_reg.h" #include "i915_utils.h" #define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details." @@ -125,3 +126,19 @@ bool i915_vtd_active(struct drm_i915_private *i915) /* Running as a guest, we assume the host is enforcing VT'd */ return i915_run_as_guest(); } + +bool i915_direct_stolen_access(struct drm_i915_private *i915) +{ + /* + * Wa_22018444074 + * + * Access via BAR can hang MTL, go directly to GSM/DSM, + * except for VM guests which won't have access to it. + * + * Normally this would not work but on MTL the system firmware + * should have relaxed the access permissions sufficiently. + * 0x138914==0x1 indicates that the firmware has done its job. + */ + return IS_METEORLAKE(i915) && !i915_run_as_guest() && + intel_uncore_read(&i915->uncore, MTL_PCODE_STOLEN_ACCESS) == STOLEN_ACCESS_ALLOWED; +} diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index f98577967b7f..b45ef0560611 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -391,4 +391,6 @@ static inline bool i915_run_as_guest(void) bool i915_vtd_active(struct drm_i915_private *i915); +bool i915_direct_stolen_access(struct drm_i915_private *i915); + #endif /* !__I915_UTILS_H */ diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index e98b6d69a91a..9b6d87c8b583 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -41,7 +41,7 @@ * To virtualize GPU resources GVT-g driver depends on hypervisor technology * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability * and be virtualized within GVT-g device module. More architectural design - * doc is available on https://01.org/group/2230/documentation-list. + * doc is available on https://github.com/intel/gvt-linux/wiki. */ static LIST_HEAD(intel_gvt_devices); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 60a03340bbd4..52d998e5c21a 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem, if (memchr_inv(result, value, sizeof(result))) { dev_err(mem->i915->drm.dev, "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n", - &mem->region, &mem->io_start, &offset, caller, + &mem->region, &mem->io.start, &offset, caller, value, result[0], result[1], result[2]); return -EINVAL; } @@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem, int err; int i; - va = ioremap_wc(mem->io_start + offset, PAGE_SIZE); + va = ioremap_wc(mem->io.start + offset, PAGE_SIZE); if (!va) { dev_err(mem->i915->drm.dev, "Failed to ioremap memory region [%pa + %pa] for %ps\n", - &mem->io_start, &offset, caller); + &mem->io.start, &offset, caller); return -EFAULT; } @@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem, resource_size_t last, page; int err; - if (mem->io_size < PAGE_SIZE) + if (resource_size(&mem->io) < PAGE_SIZE) return 0; - last = mem->io_size - PAGE_SIZE; + last = resource_size(&mem->io) - PAGE_SIZE; /* * Quick test to check read/write access to the iomap (backing store). @@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem, struct drm_i915_private *i915 = mem->i915; int err = 0; - if (!mem->io_start) + if (!mem->io.start) return 0; if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest) @@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->i915 = i915; mem->region = DEFINE_RES_MEM(start, size); - mem->io_start = io_start; - mem->io_size = io_size; + mem->io = DEFINE_RES_MEM(io_start, io_size); mem->min_page_size = min_page_size; mem->ops = ops; mem->total = size; @@ -373,6 +372,24 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915) i915->mm.regions[i] = mem; } + for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { + struct intel_memory_region *mem = i915->mm.regions[i]; + u64 region_size, io_size; + + if (!mem) + continue; + + region_size = resource_size(&mem->region) >> 20; + io_size = resource_size(&mem->io) >> 20; + + if (resource_size(&mem->io)) + drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n", + mem->id, mem->name, region_size, &mem->region, io_size, &mem->io); + else + drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n", + mem->id, mem->name, region_size, &mem->region); + } + return 0; out_cleanup: diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 9ba36454e51b..40810cfb3fd9 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -71,8 +71,7 @@ struct intel_memory_region { struct io_mapping iomap; struct resource region; - resource_size_t io_start; - resource_size_t io_size; + struct resource io; resource_size_t min_page_size; resource_size_t total; diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index bf6097e7433d..04525d92bec5 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem) ret = i915_ttm_buddy_man_init(bdev, mem_type, false, resource_size(&mem->region), - mem->io_size, + resource_size(&mem->io), mem->min_page_size, PAGE_SIZE); if (ret) return ret; @@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem, goto out; } place.lpfn = place.fpfn + (size >> PAGE_SHIFT); - } else if (mem->io_size && mem->io_size < mem->total) { + } else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) { if (flags & I915_BO_ALLOC_GPU_ONLY) { place.flags |= TTM_PL_FLAG_TOPDOWN; } else { place.fpfn = 0; - if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) { + if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) { ret = -E2BIG; goto out; } - place.lpfn = mem->io_size >> PAGE_SHIFT; + place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT; } } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index dfefad5a5fec..76400e9c40f0 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1800,7 +1800,10 @@ static const struct intel_forcewake_range __mtl_fw_ranges[] = { GEN_FW_RANGE(0x24000, 0x2ffff, 0), /* 0x24000 - 0x2407f: always on 0x24080 - 0x2ffff: reserved */ - GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT) + GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), + GEN_FW_RANGE(0x40000, 0x1901ef, 0), + GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT) + /* FIXME: WA to wake GT while triggering H2G */ }; /* diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index b61fe850e924..0d89d70b9c36 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -156,9 +156,9 @@ static int live_active_wait(void *arg) __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE); if (!READ_ONCE(active->retired)) { - struct drm_printer p = drm_err_printer(__func__); + struct drm_printer p = drm_err_printer(&i915->drm, __func__); - pr_err("i915_active not retired after waiting!\n"); + drm_printf(&p, "i915_active not retired after waiting!\n"); i915_active_print(&active->base, &p); err = -EINVAL; @@ -189,9 +189,9 @@ static int live_active_retire(void *arg) err = -EIO; if (!READ_ONCE(active->retired)) { - struct drm_printer p = drm_err_printer(__func__); + struct drm_printer p = drm_err_printer(&i915->drm, __func__); - pr_err("i915_active not retired after flushing!\n"); + drm_printf(&p, "i915_active not retired after flushing!\n"); i915_active_print(&active->base, &p); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index d985d9bae2e8..ae6070b5bf07 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj) u64 start = drm_buddy_block_offset(block); u64 end = start + drm_buddy_block_size(mm, block); - if (start < mr->io_size) - total += min_t(u64, end, mr->io_size) - start; + if (start < resource_size(&mr->io)) + total += min_t(u64, end, resource_size(&mr->io)) - start; } return total; diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c index 240beafb38ed..3cad6dac06b0 100644 --- a/drivers/gpu/drm/i915/soc/intel_pch.c +++ b/drivers/gpu/drm/i915/soc/intel_pch.c @@ -140,11 +140,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); return PCH_ADP; - case INTEL_PCH_MTP_DEVICE_ID_TYPE: - case INTEL_PCH_MTP2_DEVICE_ID_TYPE: - drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n"); - drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv)); - return PCH_MTP; default: return PCH_NONE; } @@ -173,9 +168,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv, * make an educated guess as to which PCH is really there. */ - if (IS_METEORLAKE(dev_priv)) - id = INTEL_PCH_MTP_DEVICE_ID_TYPE; - else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) + if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) id = INTEL_PCH_ADP_DEVICE_ID_TYPE; else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) id = INTEL_PCH_TGP_DEVICE_ID_TYPE; @@ -225,6 +218,13 @@ void intel_detect_pch(struct drm_i915_private *dev_priv) if (DISPLAY_VER(dev_priv) >= 20) { dev_priv->pch_type = PCH_LNL; return; + } else if (IS_METEORLAKE(dev_priv)) { + /* + * Both north display and south display are on the SoC die. + * The real PCH is uninvolved in display. + */ + dev_priv->pch_type = PCH_MTL; + return; } else if (IS_DG2(dev_priv)) { dev_priv->pch_type = PCH_DG2; return; diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h index 1b03ea60a7a8..89e89ede265d 100644 --- a/drivers/gpu/drm/i915/soc/intel_pch.h +++ b/drivers/gpu/drm/i915/soc/intel_pch.h @@ -25,11 +25,11 @@ enum intel_pch { PCH_ICP, /* Ice Lake/Jasper Lake PCH */ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */ PCH_ADP, /* Alder Lake PCH */ - PCH_MTP, /* Meteor Lake PCH */ /* Fake PCHs, functionality handled on the same PCI dev */ PCH_DG1 = 1024, PCH_DG2, + PCH_MTL, PCH_LNL, }; @@ -59,16 +59,12 @@ enum intel_pch { #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 #define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480 -#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00 -#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) -#define HAS_PCH_LNL(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LNL) -#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP) #define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2) #define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP) #define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1) diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c index 8389f2d7d021..0e668fc1e0f9 100644 --- a/drivers/gpu/drm/lima/lima_ctx.c +++ b/drivers/gpu/drm/lima/lima_ctx.c @@ -19,7 +19,7 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id) kref_init(&ctx->refcnt); for (i = 0; i < lima_pipe_num; i++) { - err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty); + err = lima_sched_context_init(dev->pipe + i, ctx->context + i); if (err) goto err_out0; } diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h index 74e2be09090f..5b1063ce968b 100644 --- a/drivers/gpu/drm/lima/lima_ctx.h +++ b/drivers/gpu/drm/lima/lima_ctx.h @@ -13,7 +13,6 @@ struct lima_ctx { struct kref refcnt; struct lima_device *dev; struct lima_sched_context context[lima_pipe_num]; - atomic_t guilty; /* debug info */ char pname[TASK_COMM_LEN]; diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c index 8dd501b7a3d0..6b354e2fb61d 100644 --- a/drivers/gpu/drm/lima/lima_gp.c +++ b/drivers/gpu/drm/lima/lima_gp.c @@ -34,11 +34,11 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data) if (state & LIMA_GP_IRQ_MASK_ERROR) { if ((state & LIMA_GP_IRQ_MASK_ERROR) == LIMA_GP_IRQ_PLBU_OUT_OF_MEM) { - dev_dbg(dev->dev, "gp out of heap irq status=%x\n", - status); + dev_dbg(dev->dev, "%s out of heap irq status=%x\n", + lima_ip_name(ip), status); } else { - dev_err(dev->dev, "gp error irq state=%x status=%x\n", - state, status); + dev_err(dev->dev, "%s error irq state=%x status=%x\n", + lima_ip_name(ip), state, status); if (task) task->recoverable = false; } @@ -89,7 +89,8 @@ static int lima_gp_soft_reset_async_wait(struct lima_ip *ip) v & LIMA_GP_IRQ_RESET_COMPLETED, 0, 100); if (err) { - dev_err(dev->dev, "gp soft reset time out\n"); + dev_err(dev->dev, "%s soft reset time out\n", + lima_ip_name(ip)); return err; } @@ -166,6 +167,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe, gp_write(LIMA_GP_CMD, cmd); } +static int lima_gp_bus_stop_poll(struct lima_ip *ip) +{ + return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED); +} + static int lima_gp_hard_reset_poll(struct lima_ip *ip) { gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000); @@ -179,16 +185,30 @@ static int lima_gp_hard_reset(struct lima_ip *ip) gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000); gp_write(LIMA_GP_INT_MASK, 0); + + gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS); + ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100); + if (ret) { + dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip)); + return ret; + } gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET); ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100); if (ret) { - dev_err(dev->dev, "gp hard reset timeout\n"); + dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip)); return ret; } gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0); gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); + + /* + * if there was an async soft reset queued, + * don't wait for it in the next job + */ + ip->data.async_reset = false; + return 0; } @@ -201,8 +221,9 @@ static void lima_gp_task_error(struct lima_sched_pipe *pipe) { struct lima_ip *ip = pipe->processor[0]; - dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n", - gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS)); + dev_err(ip->dev->dev, "%s task error int_state=%x status=%x\n", + lima_ip_name(ip), gp_read(LIMA_GP_INT_STAT), + gp_read(LIMA_GP_STATUS)); lima_gp_hard_reset(ip); } @@ -305,7 +326,7 @@ int lima_gp_init(struct lima_ip *ip) err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { - dev_err(dev->dev, "gp %s fail to request irq\n", + dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip)); return err; } diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c index c4080a02957b..184106ce55f8 100644 --- a/drivers/gpu/drm/lima/lima_l2_cache.c +++ b/drivers/gpu/drm/lima/lima_l2_cache.c @@ -21,7 +21,8 @@ static int lima_l2_cache_wait_idle(struct lima_ip *ip) !(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY), 0, 1000); if (err) { - dev_err(dev->dev, "l2 cache wait command timeout\n"); + dev_err(dev->dev, "%s wait command timeout\n", + lima_ip_name(ip)); return err; } return 0; @@ -83,7 +84,8 @@ int lima_l2_cache_init(struct lima_ip *ip) spin_lock_init(&ip->data.lock); size = l2_cache_read(LIMA_L2_CACHE_SIZE); - dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n", + dev_info(dev->dev, "%s %uK, %u-way, %ubyte cache line, %ubit external bus\n", + lima_ip_name(ip), 1 << (((size >> 16) & 0xff) - 10), 1 << ((size >> 8) & 0xff), 1 << (size & 0xff), diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c index a1ae6c252dc2..e18317c5ca8c 100644 --- a/drivers/gpu/drm/lima/lima_mmu.c +++ b/drivers/gpu/drm/lima/lima_mmu.c @@ -22,7 +22,8 @@ cond, 0, 100); \ if (__ret) \ dev_err(dev->dev, \ - "mmu command %x timeout\n", cmd); \ + "%s command %x timeout\n", \ + lima_ip_name(ip), cmd); \ __ret; \ }) @@ -40,14 +41,13 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data) if (status & LIMA_MMU_INT_PAGE_FAULT) { u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR); - dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n", - fault, LIMA_MMU_STATUS_BUS_ID(status), - status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read", - lima_ip_name(ip)); + dev_err(dev->dev, "%s page fault at 0x%x from bus id %d of type %s\n", + lima_ip_name(ip), fault, LIMA_MMU_STATUS_BUS_ID(status), + status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read"); } if (status & LIMA_MMU_INT_READ_BUS_ERROR) - dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip)); + dev_err(dev->dev, "%s irq bus error\n", lima_ip_name(ip)); /* mask all interrupts before resume */ mmu_write(LIMA_MMU_INT_MASK, 0); @@ -102,14 +102,14 @@ int lima_mmu_init(struct lima_ip *ip) mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) { - dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip)); + dev_err(dev->dev, "%s dte write test fail\n", lima_ip_name(ip)); return -EIO; } err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { - dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip)); + dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip)); return err; } @@ -152,7 +152,7 @@ void lima_mmu_page_fault_resume(struct lima_ip *ip) u32 v; if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) { - dev_info(dev->dev, "mmu resume\n"); + dev_info(dev->dev, "%s resume\n", lima_ip_name(ip)); mmu_write(LIMA_MMU_INT_MASK, 0); mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c index e397e1146e96..113cb9b215cd 100644 --- a/drivers/gpu/drm/lima/lima_pmu.c +++ b/drivers/gpu/drm/lima/lima_pmu.c @@ -21,7 +21,8 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip) v, v & LIMA_PMU_INT_CMD_MASK, 100, 100000); if (err) { - dev_err(dev->dev, "timeout wait pmu cmd\n"); + dev_err(dev->dev, "%s timeout wait pmu cmd\n", + lima_ip_name(ip)); return err; } diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c index a5c95bed08c0..d0d2db0ef1ce 100644 --- a/drivers/gpu/drm/lima/lima_pp.c +++ b/drivers/gpu/drm/lima/lima_pp.c @@ -26,8 +26,8 @@ static void lima_pp_handle_irq(struct lima_ip *ip, u32 state) if (state & LIMA_PP_IRQ_MASK_ERROR) { u32 status = pp_read(LIMA_PP_STATUS); - dev_err(dev->dev, "pp error irq state=%x status=%x\n", - state, status); + dev_err(dev->dev, "%s error irq state=%x status=%x\n", + lima_ip_name(ip), state, status); pipe->error = true; @@ -125,7 +125,7 @@ static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip) ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100); if (ret) { - dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip)); + dev_err(dev->dev, "%s reset time out\n", lima_ip_name(ip)); return ret; } @@ -168,6 +168,11 @@ static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb) } } +static int lima_pp_bus_stop_poll(struct lima_ip *ip) +{ + return !!(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_BUS_STOPPED); +} + static int lima_pp_hard_reset_poll(struct lima_ip *ip) { pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000); @@ -181,16 +186,31 @@ static int lima_pp_hard_reset(struct lima_ip *ip) pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000); pp_write(LIMA_PP_INT_MASK, 0); + + pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_STOP_BUS); + ret = lima_poll_timeout(ip, lima_pp_bus_stop_poll, 10, 100); + if (ret) { + dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip)); + return ret; + } + pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET); ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100); if (ret) { - dev_err(dev->dev, "pp hard reset timeout\n"); + dev_err(dev->dev, "%s hard reset timeout\n", lima_ip_name(ip)); return ret; } pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0); pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL); pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED); + + /* + * if there was an async soft reset queued, + * don't wait for it in the next job + */ + ip->data.async_reset = false; + return 0; } @@ -254,7 +274,7 @@ int lima_pp_init(struct lima_ip *ip) err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { - dev_err(dev->dev, "pp %s fail to request irq\n", + dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip)); return err; } @@ -289,7 +309,7 @@ int lima_pp_bcast_init(struct lima_ip *ip) err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { - dev_err(dev->dev, "pp %s fail to request irq\n", + dev_err(dev->dev, "%s fail to request irq\n", lima_ip_name(ip)); return err; } @@ -403,8 +423,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe) for (i = 0; i < pipe->num_processor; i++) { struct lima_ip *ip = pipe->processor[i]; - dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n", - i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS)); + dev_err(ip->dev->dev, "%s task error %d int_state=%x status=%x\n", + lima_ip_name(ip), i, pp_read(LIMA_PP_INT_STATUS), + pp_read(LIMA_PP_STATUS)); lima_pp_hard_reset(ip); } diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index c3bf8cda8498..00b19adfc888 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ +#include <linux/hardirq.h> #include <linux/iosys-map.h> #include <linux/kthread.h> #include <linux/slab.h> @@ -153,13 +154,12 @@ void lima_sched_task_fini(struct lima_sched_task *task) } int lima_sched_context_init(struct lima_sched_pipe *pipe, - struct lima_sched_context *context, - atomic_t *guilty) + struct lima_sched_context *context) { struct drm_gpu_scheduler *sched = &pipe->base; return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, guilty); + &sched, 1, NULL); } void lima_sched_context_fini(struct lima_sched_pipe *pipe, @@ -401,9 +401,35 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_sched_task *task = to_lima_task(job); struct lima_device *ldev = pipe->ldev; + struct lima_ip *ip = pipe->processor[0]; + int i; + + /* + * If the GPU managed to complete this jobs fence, the timeout is + * spurious. Bail out. + */ + if (dma_fence_is_signaled(task->fence)) { + DRM_WARN("%s spurious timeout\n", lima_ip_name(ip)); + return DRM_GPU_SCHED_STAT_NOMINAL; + } + + /* + * Lima IRQ handler may take a long time to process an interrupt + * if there is another IRQ handler hogging the processing. + * In order to catch such cases and not report spurious Lima job + * timeouts, synchronize the IRQ handler and re-check the fence + * status. + */ + for (i = 0; i < pipe->num_processor; i++) + synchronize_irq(pipe->processor[i]->irq); + + if (dma_fence_is_signaled(task->fence)) { + DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip)); + return DRM_GPU_SCHED_STAT_NOMINAL; + } if (!pipe->error) - DRM_ERROR("lima job timeout\n"); + DRM_ERROR("%s job timeout\n", lima_ip_name(ip)); drm_sched_stop(&pipe->base, &task->base); @@ -417,8 +443,6 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job if (pipe->bcast_mmu) lima_mmu_page_fault_resume(pipe->bcast_mmu); else { - int i; - for (i = 0; i < pipe->num_mmu; i++) lima_mmu_page_fault_resume(pipe->mmu[i]); } @@ -481,7 +505,7 @@ static void lima_sched_recover_work(struct work_struct *work) int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) { unsigned int timeout = lima_sched_timeout_ms > 0 ? - lima_sched_timeout_ms : 500; + lima_sched_timeout_ms : 10000; pipe->fence_context = dma_fence_context_alloc(1); spin_lock_init(&pipe->fence_lock); diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h index 6a11764d87b3..6bd4f3b70109 100644 --- a/drivers/gpu/drm/lima/lima_sched.h +++ b/drivers/gpu/drm/lima/lima_sched.h @@ -91,8 +91,7 @@ int lima_sched_task_init(struct lima_sched_task *task, void lima_sched_task_fini(struct lima_sched_task *task); int lima_sched_context_init(struct lima_sched_pipe *pipe, - struct lima_sched_context *context, - atomic_t *guilty); + struct lima_sched_context *context); void lima_sched_context_fini(struct lima_sched_pipe *pipe, struct lima_sched_context *context); struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 2136a596efa1..0ba72102636a 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2042,12 +2042,12 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge) return ret; } -static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); bool enabled = mtk_dp->enabled; - struct edid *new_edid = NULL; + const struct drm_edid *drm_edid; struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg; if (!enabled) { @@ -2055,7 +2055,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge, mtk_dp_aux_panel_poweron(mtk_dp, true); } - new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc); + drm_edid = drm_edid_read_ddc(connector, &mtk_dp->aux.ddc); /* * Parse capability here to let atomic_get_input_bus_fmts and @@ -2063,17 +2063,26 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge, */ if (mtk_dp_parse_capabilities(mtk_dp)) { drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n"); - kfree(new_edid); - new_edid = NULL; + drm_edid_free(drm_edid); + drm_edid = NULL; } - if (new_edid) { + if (drm_edid) { + /* + * FIXME: get rid of drm_edid_raw() + */ + const struct edid *edid = drm_edid_raw(drm_edid); struct cea_sad *sads; - audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads); + audio_caps->sad_count = drm_edid_to_sad(edid, &sads); kfree(sads); - audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid); + /* + * FIXME: This should use connector->display_info.has_audio from + * a path that has read the EDID and called + * drm_edid_connector_update(). + */ + audio_caps->detect_monitor = drm_detect_monitor_audio(edid); } if (!enabled) { @@ -2081,7 +2090,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge, drm_atomic_bridge_chain_post_disable(bridge, connector->state->state); } - return new_edid; + return drm_edid; } static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux, @@ -2433,7 +2442,7 @@ static const struct drm_bridge_funcs mtk_dp_bridge_funcs = { .atomic_enable = mtk_dp_bridge_atomic_enable, .atomic_disable = mtk_dp_bridge_atomic_disable, .mode_valid = mtk_dp_bridge_mode_valid, - .get_edid = mtk_dp_get_edid, + .edid_read = mtk_dp_edid_read, .detect = mtk_dp_bdg_detect, }; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 86133bf16326..c6bdc565e4a9 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1265,19 +1265,27 @@ static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridg return mtk_hdmi_detect(hdmi); } -static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); - struct edid *edid; + const struct drm_edid *drm_edid; if (!hdmi->ddc_adpt) return NULL; - edid = drm_get_edid(connector, hdmi->ddc_adpt); - if (!edid) - return NULL; - hdmi->dvi_mode = !drm_detect_monitor_audio(edid); - return edid; + drm_edid = drm_edid_read_ddc(connector, hdmi->ddc_adpt); + if (drm_edid) { + /* + * FIXME: This should use !connector->display_info.has_audio (or + * !connector->display_info.is_hdmi) from a path that has read + * the EDID and called drm_edid_connector_update(). + */ + const struct edid *edid = drm_edid_raw(drm_edid); + + hdmi->dvi_mode = !drm_detect_monitor_audio(edid); + } + + return drm_edid; } static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, @@ -1417,7 +1425,7 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = { .atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable, .atomic_enable = mtk_hdmi_bridge_atomic_enable, .detect = mtk_hdmi_bridge_detect, - .get_edid = mtk_hdmi_bridge_get_edid, + .edid_read = mtk_hdmi_bridge_edid_read, }; static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index cb674966e9ac..17a5cca007e2 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -312,7 +312,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) /* Encoder Initialization */ - ret = meson_encoder_cvbs_init(priv); + ret = meson_encoder_cvbs_probe(priv); if (ret) goto exit_afbcd; @@ -326,12 +326,12 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } } - ret = meson_encoder_hdmi_init(priv); + ret = meson_encoder_hdmi_probe(priv); if (ret) goto exit_afbcd; if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - ret = meson_encoder_dsi_init(priv); + ret = meson_encoder_dsi_probe(priv); if (ret) goto exit_afbcd; } diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index 3f73b211fa8e..d1191de855d9 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -219,7 +219,7 @@ static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, }; -int meson_encoder_cvbs_init(struct meson_drm *priv) +int meson_encoder_cvbs_probe(struct meson_drm *priv) { struct drm_device *drm = priv->drm; struct meson_encoder_cvbs *meson_encoder_cvbs; @@ -240,10 +240,9 @@ int meson_encoder_cvbs_init(struct meson_drm *priv) meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote); of_node_put(remote); - if (!meson_encoder_cvbs->next_bridge) { - dev_err(priv->dev, "Failed to find CVBS Connector bridge\n"); - return -EPROBE_DEFER; - } + if (!meson_encoder_cvbs->next_bridge) + return dev_err_probe(priv->dev, -EPROBE_DEFER, + "Failed to find CVBS Connector bridge\n"); /* CVBS Encoder Bridge */ meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs; @@ -259,10 +258,9 @@ int meson_encoder_cvbs_init(struct meson_drm *priv) /* Encoder */ ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder, DRM_MODE_ENCODER_TVDAC); - if (ret) { - dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(priv->dev, ret, + "Failed to init CVBS encoder\n"); meson_encoder_cvbs->encoder.possible_crtcs = BIT(0); @@ -276,10 +274,10 @@ int meson_encoder_cvbs_init(struct meson_drm *priv) /* Initialize & attach Bridge Connector */ connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder); - if (IS_ERR(connector)) { - dev_err(priv->dev, "Unable to create CVBS bridge connector\n"); - return PTR_ERR(connector); - } + if (IS_ERR(connector)) + return dev_err_probe(priv->dev, PTR_ERR(connector), + "Unable to create CVBS bridge connector\n"); + drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder); priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs; @@ -294,6 +292,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv) if (priv->encoders[MESON_ENC_CVBS]) { meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS]; drm_bridge_remove(&meson_encoder_cvbs->bridge); - drm_bridge_remove(meson_encoder_cvbs->next_bridge); } } diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h index 09710fec3c66..7b7bc85c03f7 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h @@ -24,7 +24,7 @@ struct meson_cvbs_mode { /* Modes supported by the CVBS output */ extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT]; -int meson_encoder_cvbs_init(struct meson_drm *priv); +int meson_encoder_cvbs_probe(struct meson_drm *priv); void meson_encoder_cvbs_remove(struct meson_drm *priv); #endif /* __MESON_VENC_CVBS_H */ diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c index 3f93c70488ca..7816902f5907 100644 --- a/drivers/gpu/drm/meson/meson_encoder_dsi.c +++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c @@ -100,7 +100,7 @@ static const struct drm_bridge_funcs meson_encoder_dsi_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, }; -int meson_encoder_dsi_init(struct meson_drm *priv) +int meson_encoder_dsi_probe(struct meson_drm *priv) { struct meson_encoder_dsi *meson_encoder_dsi; struct device_node *remote; @@ -118,10 +118,9 @@ int meson_encoder_dsi_init(struct meson_drm *priv) } meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote); - if (!meson_encoder_dsi->next_bridge) { - dev_dbg(priv->dev, "Failed to find DSI transceiver bridge\n"); - return -EPROBE_DEFER; - } + if (!meson_encoder_dsi->next_bridge) + return dev_err_probe(priv->dev, -EPROBE_DEFER, + "Failed to find DSI transceiver bridge\n"); /* DSI Encoder Bridge */ meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs; @@ -135,19 +134,17 @@ int meson_encoder_dsi_init(struct meson_drm *priv) /* Encoder */ ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder, DRM_MODE_ENCODER_DSI); - if (ret) { - dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(priv->dev, ret, + "Failed to init DSI encoder\n"); meson_encoder_dsi->encoder.possible_crtcs = BIT(0); /* Attach DSI Encoder Bridge to Encoder */ ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0); - if (ret) { - dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(priv->dev, ret, + "Failed to attach bridge\n"); /* * We should have now in place: @@ -168,6 +165,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv) if (priv->encoders[MESON_ENC_DSI]) { meson_encoder_dsi = priv->encoders[MESON_ENC_DSI]; drm_bridge_remove(&meson_encoder_dsi->bridge); - drm_bridge_remove(meson_encoder_dsi->next_bridge); } } diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.h b/drivers/gpu/drm/meson/meson_encoder_dsi.h index 9277d7015193..85d5b61805f2 100644 --- a/drivers/gpu/drm/meson/meson_encoder_dsi.h +++ b/drivers/gpu/drm/meson/meson_encoder_dsi.h @@ -7,7 +7,7 @@ #ifndef __MESON_ENCODER_DSI_H #define __MESON_ENCODER_DSI_H -int meson_encoder_dsi_init(struct meson_drm *priv); +int meson_encoder_dsi_probe(struct meson_drm *priv); void meson_encoder_dsi_remove(struct meson_drm *priv); #endif /* __MESON_ENCODER_DSI_H */ diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index fff6ce394f98..0593a1cde906 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -366,7 +366,7 @@ static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, }; -int meson_encoder_hdmi_init(struct meson_drm *priv) +int meson_encoder_hdmi_probe(struct meson_drm *priv) { struct meson_encoder_hdmi *meson_encoder_hdmi; struct platform_device *pdev; @@ -386,8 +386,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv) meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote); if (!meson_encoder_hdmi->next_bridge) { - dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n"); - ret = -EPROBE_DEFER; + ret = dev_err_probe(priv->dev, -EPROBE_DEFER, + "Failed to find HDMI transceiver bridge\n"); goto err_put_node; } @@ -405,7 +405,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv) ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder, DRM_MODE_ENCODER_TMDS); if (ret) { - dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret); + dev_err_probe(priv->dev, ret, "Failed to init HDMI encoder\n"); goto err_put_node; } @@ -415,7 +415,7 @@ int meson_encoder_hdmi_init(struct meson_drm *priv) ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) { - dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + dev_err_probe(priv->dev, ret, "Failed to attach bridge\n"); goto err_put_node; } @@ -423,8 +423,9 @@ int meson_encoder_hdmi_init(struct meson_drm *priv) meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm, &meson_encoder_hdmi->encoder); if (IS_ERR(meson_encoder_hdmi->connector)) { - dev_err(priv->dev, "Unable to create HDMI bridge connector\n"); - ret = PTR_ERR(meson_encoder_hdmi->connector); + ret = dev_err_probe(priv->dev, + PTR_ERR(meson_encoder_hdmi->connector), + "Unable to create HDMI bridge connector\n"); goto err_put_node; } drm_connector_attach_encoder(meson_encoder_hdmi->connector, @@ -486,6 +487,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv) if (priv->encoders[MESON_ENC_HDMI]) { meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI]; drm_bridge_remove(&meson_encoder_hdmi->bridge); - drm_bridge_remove(meson_encoder_hdmi->next_bridge); } } diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h index a6cd38eb5f71..fd5485875db8 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h @@ -7,7 +7,7 @@ #ifndef __MESON_ENCODER_HDMI_H #define __MESON_ENCODER_HDMI_H -int meson_encoder_hdmi_init(struct meson_drm *priv); +int meson_encoder_hdmi_probe(struct meson_drm *priv); void meson_encoder_hdmi_remove(struct meson_drm *priv); #endif /* __MESON_ENCODER_HDMI_H */ diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index b28c5e4828f4..5e4d48df4854 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig @@ -11,3 +11,15 @@ config DRM_MGAG200 MGA G200 desktop chips and the server variants. It requires 0.3.0 of the modesetting userspace driver, and a version of mga driver that will fail on KMS enabled devices. + +config DRM_MGAG200_IOBURST_WORKAROUND + bool "Disable buffer caching" + depends on DRM_MGAG200 && PREEMPT_RT && X86 + help + Enable a workaround to avoid I/O bursts within the mgag200 driver at + the expense of overall display performance. + It restores the <v5.10 behavior, by mapping the framebuffer in system + RAM as Write-Combining, and flushing the cache after each write. + This is only useful on x86_64 if you want to run processes with + deterministic latency. + If unsure, say N. diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 54fce00e2136..573dbe256aa8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -84,6 +84,20 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size) return offset - 65536; } +#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND) +static struct drm_gem_object *mgag200_create_object(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem; + + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return NULL; + + shmem->map_wc = true; + return &shmem->base; +} +#endif + /* * DRM driver */ @@ -99,6 +113,9 @@ static const struct drm_driver mgag200_driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, +#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND) + .gem_create_object = mgag200_create_object, +#endif DRM_GEM_SHMEM_DRIVER_OPS, }; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 0eb769dd76ce..e17cb4c5f774 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -13,6 +13,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_cache.h> #include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> #include <drm/drm_format_helper.h> @@ -436,6 +437,13 @@ static void mgag200_handle_damage(struct mga_device *mdev, const struct iosys_ma iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip)); drm_fb_memcpy(&dst, fb->pitches, vmap, fb, clip); + + /* Flushing the cache greatly improves latency on x86_64 */ +#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND) + if (!vmap->is_iomem) + drm_clflush_virt_range(vmap->vaddr + clip->y1 * fb->pitches[0], + drm_rect_height(clip) * fb->pitches[0]); +#endif } /* diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index f5e01471b0b0..4a5b5112227f 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -236,24 +236,33 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, msm_hdmi_audio_update(hdmi); } -static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; - struct edid *edid; + const struct drm_edid *drm_edid; uint32_t hdmi_ctrl; hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); - edid = drm_get_edid(connector, hdmi->i2c); + drm_edid = drm_edid_read_ddc(connector, hdmi->i2c); hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); - hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); + if (drm_edid) { + /* + * FIXME: This should use connector->display_info.is_hdmi from a + * path that has read the EDID and called + * drm_edid_connector_update(). + */ + const struct edid *edid = drm_edid_raw(drm_edid); - return edid; + hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); + } + + return drm_edid; } static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge, @@ -290,12 +299,12 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge } static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = { - .pre_enable = msm_hdmi_bridge_pre_enable, - .post_disable = msm_hdmi_bridge_post_disable, - .mode_set = msm_hdmi_bridge_mode_set, - .mode_valid = msm_hdmi_bridge_mode_valid, - .get_edid = msm_hdmi_bridge_get_edid, - .detect = msm_hdmi_bridge_detect, + .pre_enable = msm_hdmi_bridge_pre_enable, + .post_disable = msm_hdmi_bridge_post_disable, + .mode_set = msm_hdmi_bridge_mode_set, + .mode_valid = msm_hdmi_bridge_mode_valid, + .edid_read = msm_hdmi_bridge_edid_read, + .detect = msm_hdmi_bridge_detect, }; static void diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 5cc8d358cc97..d5512037c38b 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,8 @@ struct msm_iommu_pagetable { struct msm_mmu base; struct msm_mmu *parent; struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; @@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = { static void msm_iommu_tlb_flush_all(void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, @@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, { } -static const struct iommu_flush_ops null_tlb_ops = { +static const struct iommu_flush_ops tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, @@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* The incoming cfg will have the TTBR1 quirk enabled */ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; - ttbr0_cfg.tlb = &null_tlb_ops; + ttbr0_cfg.tlb = &tlb_ops; pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, - &ttbr0_cfg, iommu->domain); + &ttbr0_cfg, pagetable); if (!pagetable->pgtbl_ops) { kfree(pagetable); @@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Needed later for TLB flush */ pagetable->parent = parent; + pagetable->tlb = ttbr1_cfg->tlb; + pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 18de2f17e249..ea10bf81582e 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -167,7 +167,11 @@ static int lcdif_load(struct drm_device *drm) return ret; /* Modeset init */ - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) { + dev_err(drm->dev, "Failed to initialize mode config\n"); + return ret; + } ret = lcdif_kms_init(lcdif); if (ret < 0) { @@ -227,7 +231,6 @@ static void lcdif_unload(struct drm_device *drm) drm_crtc_vblank_off(&lcdif->crtc); drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); pm_runtime_put_sync(drm->dev); pm_runtime_disable(drm->dev); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index b483ef48216a..cb5ce4e81fc7 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -249,7 +249,11 @@ static int mxsfb_load(struct drm_device *drm, pm_runtime_enable(drm->dev); /* Modeset init */ - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) { + dev_err(drm->dev, "Failed to initialize mode config\n"); + goto err_vblank; + } ret = mxsfb_kms_init(mxsfb); if (ret < 0) { @@ -312,7 +316,6 @@ err_vblank: static void mxsfb_unload(struct drm_device *drm) { drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); pm_runtime_get_sync(drm->dev); mxsfb_irq_uninstall(drm); diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 1e6aaf95ff7c..ceef470c9fbf 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM help Say Y here if you want to enable experimental support for Shared Virtual Memory (SVM). + +config DRM_NOUVEAU_GSP_DEFAULT + bool "Use GSP firmware for Turing/Ampere (needs firmware installed)" + depends on DRM_NOUVEAU + default n + help + Say Y here if you want to use the GSP codepaths by default on + Turing and Ampere GPUs. diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8d37a694b772..0c3d88ad0b0e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -28,6 +28,7 @@ #include "wndw.h" #include "handles.h" +#include <linux/backlight.h> #include <linux/dma-mapping.h> #include <linux/hdmi.h> #include <linux/component.h> diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index d1437c08645f..6f5d376d8fcc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -9,7 +9,7 @@ #define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT) struct nvkm_gsp_mem { - u32 size; + size_t size; void *data; dma_addr_t addr; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index a04156ca8390..cd14f993bdd1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, struct nouveau_abi16_ntfy *ntfy, *temp; /* Cancel all jobs from the entity's queue. */ - drm_sched_entity_fini(&chan->sched.entity); + if (chan->sched) + drm_sched_entity_fini(&chan->sched->entity); if (chan->chan) nouveau_channel_idle(chan->chan); - nouveau_sched_fini(&chan->sched); + if (chan->sched) + nouveau_sched_destroy(&chan->sched); /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { @@ -197,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; + struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device); struct nvkm_gr *gr = nvxx_gr(device); struct drm_nouveau_getparam *getparam = data; struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -261,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) getparam->value = nouveau_exec_push_max_from_ib_max(ib_max); break; } + case NOUVEAU_GETPARAM_VRAM_BAR_SIZE: + getparam->value = nvkm_device->func->resource_size(nvkm_device, 1); + break; + case NOUVEAU_GETPARAM_VRAM_USED: { + struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + getparam->value = (u64)ttm_resource_manager_usage(vram_mgr) << PAGE_SHIFT; + break; + } default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; @@ -337,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq, - chan->chan->dma.ib_max); - if (ret) - goto done; + /* If we're not using the VM_BIND uAPI, we don't need a scheduler. + * + * The client lock is already acquired by nouveau_abi16_get(). + */ + if (nouveau_cli_uvmm(cli)) { + ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq, + chan->chan->dma.ib_max); + if (ret) + goto done; + } init->channel = chan->chan->chid; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 1f5e243c0c75..11c8c4a80079 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -26,7 +26,7 @@ struct nouveau_abi16_chan { struct nouveau_bo *ntfy; struct nouveau_vma *ntfy_vma; struct nvkm_mm heap; - struct nouveau_sched sched; + struct nouveau_sched *sched; }; struct nouveau_abi16 { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 6f6c31a9937b..a947e1d5f309 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); - nouveau_sched_fini(&cli->sched); + if (cli->sched) + nouveau_sched_destroy(&cli->sched); if (uvmm) nouveau_uvmm_fini(uvmm); nouveau_vmm_fini(&cli->svm); @@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->mem = &mems[ret]; /* Don't pass in the (shared) sched_wq in order to let - * nouveau_sched_init() create a dedicated one for VM_BIND jobs. + * nouveau_sched_create() create a dedicated one for VM_BIND jobs. * * This is required to ensure that for VM_BIND jobs free_job() work and * run_job() work can always run concurrently and hence, free_job() work @@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, * locks which indirectly or directly are held for allocations * elsewhere. */ - ret = nouveau_sched_init(&cli->sched, drm, NULL, 1); + ret = nouveau_sched_create(&cli->sched, drm, NULL, 1); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8a6d94c8b163..e239c6bf4afa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -98,7 +98,7 @@ struct nouveau_cli { bool disabled; } uvmm; - struct nouveau_sched sched; + struct nouveau_sched *sched; const struct nvif_mclass *mem; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index bc5d71b79ab2..e65c0ef23bc7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (ret) goto out; - args.sched = &chan16->sched; + args.sched = chan16->sched; args.file_priv = file_priv; args.chan = chan; diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index dd98f6910f9c..32fa2e273965 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = { .free_job = nouveau_sched_free_job, }; -int +static int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, struct workqueue_struct *wq, u32 credit_limit) { @@ -453,7 +453,30 @@ fail_wq: return ret; } -void +int +nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit) +{ + struct nouveau_sched *sched; + int ret; + + sched = kzalloc(sizeof(*sched), GFP_KERNEL); + if (!sched) + return -ENOMEM; + + ret = nouveau_sched_init(sched, drm, wq, credit_limit); + if (ret) { + kfree(sched); + return ret; + } + + *psched = sched; + + return 0; +} + + +static void nouveau_sched_fini(struct nouveau_sched *sched) { struct drm_gpu_scheduler *drm_sched = &sched->base; @@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched) if (sched->wq) destroy_workqueue(sched->wq); } + +void +nouveau_sched_destroy(struct nouveau_sched **psched) +{ + struct nouveau_sched *sched = *psched; + + nouveau_sched_fini(sched); + kfree(sched); + + *psched = NULL; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index a6528f5981e6..e1f01a23e6f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -111,8 +111,8 @@ struct nouveau_sched { } job; }; -int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, - struct workqueue_struct *wq, u32 credit_limit); -void nouveau_sched_fini(struct nouveau_sched *sched); +int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit); +void nouveau_sched_destroy(struct nouveau_sched **psched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 4d1008915499..b4da82ddbb6b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -1007,7 +1007,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) if (ret) return ret; - buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL); + buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL); if (!buffer->fault) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 4f223c972c6a..0a0a11dc9ec0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, if (ret) return ret; - args.sched = &cli->sched; + args.sched = cli->sched; args.file_priv = file_priv; ret = nouveau_uvmm_vm_bind(&args); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c index 4135690326f4..3a30bea30e36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c @@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, rm->flush = r535_bar_flush; ret = gf100_bar_new_(rm, device, type, inst, &bar); - *pbar = bar; if (ret) { - if (!bar) - kfree(rm); + kfree(rm); return ret; } + *pbar = bar; bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); if (!bar->flushBAR2PhysMode) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 19188683c8fc..8c2bf1c16f2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name) return (void *)fw; } +static void +shadow_fw_release(void *fw) +{ + release_firmware(fw); +} + static const struct nvbios_source shadow_fw = { .name = "firmware", .init = shadow_fw_init, - .fini = (void(*)(void *))release_firmware, + .fini = shadow_fw_release, .read = shadow_fw_read, .rw = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 5e1fa176aac4..a64c81385682 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) return 0; } +static void +nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) +{ + if (mem->data) { + /* + * Poison the buffer to catch any unexpected access from + * GSP-RM if the buffer was prematurely freed. + */ + memset(mem->data, 0xFF, mem->size); + + dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); + memset(mem, 0, sizeof(*mem)); + } +} + +static int +nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) +{ + mem->size = size; + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); + if (WARN_ON(!mem->data)) + return -ENOMEM; + + return 0; +} + static int r535_gsp_postinit(struct nvkm_gsp *gsp) { @@ -1024,6 +1050,13 @@ r535_gsp_postinit(struct nvkm_gsp *gsp) nvkm_inth_allow(&gsp->subdev.inth); nvkm_wr32(device, 0x110004, 0x00000040); + + /* Release the DMA buffers that were needed only for boot and init */ + nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); + nvkm_gsp_mem_dtor(gsp, &gsp->libos); + nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); + nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); + return ret; } @@ -1532,27 +1565,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) return 0; } -static void -nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) -{ - if (mem->data) { - dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); - mem->data = NULL; - } -} - -static int -nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem) -{ - mem->size = size; - mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); - if (WARN_ON(!mem->data)) - return -ENOMEM; - - return 0; -} - - static int r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) { @@ -1938,20 +1950,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) * See kgspCreateRadix3_IMPL */ static int -nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size, +nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, struct nvkm_gsp_radix3 *rx3) { u64 addr; for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { u64 *ptes; - int idx; + size_t bufsize; + int ret, idx; - rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); - rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size, - &rx3->mem[i].addr, GFP_KERNEL); - if (WARN_ON(!rx3->mem[i].data)) - return -ENOMEM; + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); + if (ret) + return ret; ptes = rx3->mem[i].data; if (i == 2) { @@ -1991,7 +2003,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) if (ret) return ret; - ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3); + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); if (ret) return ret; @@ -2150,6 +2162,11 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) mutex_destroy(&gsp->cmdq.mutex); r535_gsp_dtor_fws(gsp); + + nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); + nvkm_gsp_mem_dtor(gsp, &gsp->loginit); + nvkm_gsp_mem_dtor(gsp, &gsp->logintr); + nvkm_gsp_mem_dtor(gsp, &gsp->logrm); } int @@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) memcpy(gsp->sig.data, data, size); /* Build radix3 page table for ELF image. */ - ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); if (ret) return ret; @@ -2295,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { struct nvkm_subdev *subdev = &gsp->subdev; int ret; + bool enable_gsp = fwif->enable; - if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) +#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) + enable_gsp = true; +#endif + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) return -EINVAL; if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index a26b77d99d52..9b8747d83ee8 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -436,11 +436,11 @@ static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge, hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID); } -static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *hdmi4_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge); - struct edid *edid = NULL; + const struct drm_edid *drm_edid = NULL; unsigned int cec_addr; bool need_enable; int r; @@ -461,13 +461,21 @@ static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge, if (r) goto done; - edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core); + drm_edid = drm_edid_read_custom(connector, hdmi4_core_ddc_read, &hdmi->core); done: hdmi_runtime_put(hdmi); mutex_unlock(&hdmi->lock); - if (edid && edid->extensions) { + if (drm_edid) { + /* + * FIXME: The CEC physical address should be set using + * hdmi4_cec_set_phys_addr(&hdmi->core, + * connector->display_info.source_physical_address) from a path + * that has read the EDID and called + * drm_edid_connector_update(). + */ + const struct edid *edid = drm_edid_raw(drm_edid); unsigned int len = (edid->extensions + 1) * EDID_LENGTH; cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL); @@ -480,7 +488,7 @@ done: if (need_enable) hdmi4_core_disable(&hdmi->core); - return edid; + return drm_edid; } static const struct drm_bridge_funcs hdmi4_bridge_funcs = { @@ -492,7 +500,7 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = { .atomic_enable = hdmi4_bridge_enable, .atomic_disable = hdmi4_bridge_disable, .hpd_notify = hdmi4_bridge_hpd_notify, - .get_edid = hdmi4_bridge_get_edid, + .edid_read = hdmi4_bridge_edid_read, }; static void hdmi4_bridge_init(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index e6611c683857..c7ae2235ae99 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -425,11 +425,11 @@ static void hdmi5_bridge_disable(struct drm_bridge *bridge, mutex_unlock(&hdmi->lock); } -static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *hdmi5_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge); - struct edid *edid; + const struct drm_edid *drm_edid; bool need_enable; int idlemode; int r; @@ -452,7 +452,7 @@ static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge, hdmi5_core_ddc_init(&hdmi->core); - edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core); + drm_edid = drm_edid_read_custom(connector, hdmi5_core_ddc_read, &hdmi->core); hdmi5_core_ddc_uninit(&hdmi->core); @@ -464,7 +464,7 @@ static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge, if (need_enable) hdmi_core_disable(hdmi); - return (struct edid *)edid; + return drm_edid; } static const struct drm_bridge_funcs hdmi5_bridge_funcs = { @@ -475,7 +475,7 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_enable = hdmi5_bridge_enable, .atomic_disable = hdmi5_bridge_disable, - .get_edid = hdmi5_bridge_get_edid, + .edid_read = hdmi5_bridge_edid_read, }; static void hdmi5_bridge_init(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index b61abacd1b22..d037b3b8b999 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -145,6 +145,16 @@ config DRM_PANEL_LVDS handling of power supplies or control signals. It implements automatic backlight handling if the panel is attached to a backlight controller. +config DRM_PANEL_HIMAX_HX83112A + tristate "Himax HX83112A-based DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select DRM_KMS_HELPER + help + Say Y here if you want to enable support for Himax HX83112A-based + display panels, such as the one found in the Fairphone 4 smartphone. + config DRM_PANEL_HIMAX_HX8394 tristate "HIMAX HX8394 MIPI-DSI LCD panels" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 1c4f4e7f25bb..f156d7fa0bcc 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o +obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index c4c0f08e9202..bc08814954f9 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1871,6 +1871,8 @@ static int boe_panel_add(struct boe_panel *boe) gpiod_set_value(boe->enable_gpio, 0); + boe->base.prepare_prev_first = true; + drm_panel_init(&boe->base, dev, &boe_panel_funcs, DRM_MODE_CONNECTOR_DSI); err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation); diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 7d556b1bfa82..d58f90bc48fb 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -413,6 +413,7 @@ static int panel_edp_suspend(struct device *dev) { struct panel_edp *p = dev_get_drvdata(dev); + drm_dp_dpcd_set_powered(p->aux, false); gpiod_set_value_cansleep(p->enable_gpio, 0); regulator_disable(p->supply); p->unprepared_time = ktime_get_boottime(); @@ -469,6 +470,7 @@ static int panel_edp_prepare_once(struct panel_edp *p) } gpiod_set_value_cansleep(p->enable_gpio, 1); + drm_dp_dpcd_set_powered(p->aux, true); p->powered_on_time = ktime_get_boottime(); @@ -507,6 +509,7 @@ static int panel_edp_prepare_once(struct panel_edp *p) return 0; error: + drm_dp_dpcd_set_powered(p->aux, false); gpiod_set_value_cansleep(p->enable_gpio, 0); regulator_disable(p->supply); p->unprepared_time = ktime_get_boottime(); @@ -1002,19 +1005,6 @@ static const struct panel_desc auo_b101ean01 = { }, }; -static const struct drm_display_mode auo_b116xa3_mode = { - .clock = 70589, - .hdisplay = 1366, - .hsync_start = 1366 + 40, - .hsync_end = 1366 + 40 + 40, - .htotal = 1366 + 40 + 40 + 32, - .vdisplay = 768, - .vsync_start = 768 + 10, - .vsync_end = 768 + 10 + 12, - .vtotal = 768 + 10 + 12 + 6, - .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, -}; - static const struct drm_display_mode auo_b116xak01_mode = { .clock = 69300, .hdisplay = 1366, @@ -1963,12 +1953,10 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"), - EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0", - &auo_b116xa3_mode), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), - EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1", - &auo_b116xa3_mode), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"), diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c new file mode 100644 index 000000000000..466c27012abf --- /dev/null +++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree. + * Copyright (c) 2024 Luca Weiss <luca.weiss@fairphone.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer specific DSI commands */ +#define HX83112A_SETPOWER1 0xb1 +#define HX83112A_SETDISP 0xb2 +#define HX83112A_SETDRV 0xb4 +#define HX83112A_SETEXTC 0xb9 +#define HX83112A_SETBANK 0xbd +#define HX83112A_SETPTBA 0xbf +#define HX83112A_SETDGCLUT 0xc1 +#define HX83112A_SETTCON 0xc7 +#define HX83112A_SETCLOCK 0xcb +#define HX83112A_SETPANEL 0xcc +#define HX83112A_SETPOWER2 0xd2 +#define HX83112A_SETGIP0 0xd3 +#define HX83112A_SETGIP1 0xd5 +#define HX83112A_SETGIP2 0xd6 +#define HX83112A_SETGIP3 0xd8 +#define HX83112A_SETTP1 0xe7 +#define HX83112A_UNKNOWN1 0xe9 + +struct hx83112a_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data supplies[3]; + struct gpio_desc *reset_gpio; +}; + +static inline struct hx83112a_panel *to_hx83112a_panel(struct drm_panel *panel) +{ + return container_of(panel, struct hx83112a_panel, panel); +} + +static void hx83112a_reset(struct hx83112a_panel *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(20); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + msleep(20); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(50); +} + +static int hx83112a_on(struct hx83112a_panel *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETEXTC, 0x83, 0x11, 0x2a); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER1, + 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDISP, + 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19, + 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV, + 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0, + 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff, + 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07, + 0x12, 0x00, 0x29); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV, + 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00, + 0x53); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, 0x01); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTCON, + 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPANEL, 0x08); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER2, 0x2b, 0x2b); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08, + 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07, + 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10, + 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00, + 0x0f); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0, + 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP1, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, + 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03, + 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP2, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, + 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00, + 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, + 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, + 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, + 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e, + 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff, + 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, + 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, + 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50, + 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05, + 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, + 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, + 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc3); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETCLOCK, 0xd1, 0xd6); + mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f); + mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc6); + mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPTBA, 0x37); + mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(150); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + msleep(50); + + return 0; +} + +static int hx83112a_disable(struct drm_panel *panel) +{ + struct hx83112a_panel *ctx = to_hx83112a_panel(panel); + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + msleep(20); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(120); + + return 0; +} + +static int hx83112a_prepare(struct drm_panel *panel) +{ + struct hx83112a_panel *ctx = to_hx83112a_panel(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + hx83112a_reset(ctx); + + ret = hx83112a_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + return ret; + } + + return 0; +} + +static int hx83112a_unprepare(struct drm_panel *panel) +{ + struct hx83112a_panel *ctx = to_hx83112a_panel(panel); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + + return 0; +} + +static const struct drm_display_mode hx83112a_mode = { + .clock = (1080 + 28 + 8 + 8) * (2340 + 27 + 5 + 5) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 28, + .hsync_end = 1080 + 28 + 8, + .htotal = 1080 + 28 + 8 + 8, + .vdisplay = 2340, + .vsync_start = 2340 + 27, + .vsync_end = 2340 + 27 + 5, + .vtotal = 2340 + 27 + 5 + 5, + .width_mm = 67, + .height_mm = 145, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int hx83112a_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &hx83112a_mode); +} + +static const struct drm_panel_funcs hx83112a_panel_funcs = { + .prepare = hx83112a_prepare, + .unprepare = hx83112a_unprepare, + .disable = hx83112a_disable, + .get_modes = hx83112a_get_modes, +}; + +static int hx83112a_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct hx83112a_panel *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->supplies[0].supply = "vdd1"; + ctx->supplies[1].supply = "vsn"; + ctx->supplies[2].supply = "vsp"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), + ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_VIDEO_HSE | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void hx83112a_remove(struct mipi_dsi_device *dsi) +{ + struct hx83112a_panel *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id hx83112a_of_match[] = { + { .compatible = "djn,9a-3r063-1102b" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hx83112a_of_match); + +static struct mipi_dsi_driver hx83112a_driver = { + .probe = hx83112a_probe, + .remove = hx83112a_remove, + .driver = { + .name = "panel-himax-hx83112a", + .of_match_table = hx83112a_of_match, + }, +}; +module_mipi_dsi_driver(hx83112a_driver); + +MODULE_DESCRIPTION("DRM driver for hx83112a-equipped DSI panels"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c index 39e408c9f762..a4c9a5cb9811 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c @@ -11,6 +11,7 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> @@ -21,25 +22,224 @@ #include <drm/drm_modes.h> #include <drm/drm_panel.h> +struct ltk500hd1829_cmd { + char cmd; + char data; +}; + +struct ltk500hd1829_desc { + const struct drm_display_mode *mode; + const struct ltk500hd1829_cmd *init; + unsigned int num_init; +}; + struct ltk500hd1829 { struct device *dev; struct drm_panel panel; struct gpio_desc *reset_gpio; struct regulator *vcc; struct regulator *iovcc; + const struct ltk500hd1829_desc *panel_desc; bool prepared; }; -struct ltk500hd1829_cmd { - char cmd; - char data; +static const struct ltk500hd1829_cmd ltk101b4029w_init[] = { + /* Page0 */ + { 0xE0, 0x00 }, + /* PASSWORD */ + { 0xE1, 0x93 }, + { 0xE2, 0x65 }, + { 0xE3, 0xF8 }, + { 0x80, 0x03 }, /* 0X03:4-LANE; 0X02:3-LANE; 0X01:2-LANE */ + /* Page1 */ + { 0xE0, 0x01 }, + /* Set VCOM */ + { 0x00, 0x00 }, + { 0x01, 0x6F }, + /* Set Gamma Power, VGMP,VGMN,VGSP,VGSN */ + { 0x17, 0x00 }, + { 0x18, 0xAF }, /* 4.3V */ + { 0x19, 0x01 }, /* 0.3V */ + { 0x1A, 0x00 }, + { 0x1B, 0xAF }, /* 4.3V */ + { 0x1C, 0x01 }, /* 0.3V */ + /* Set Gate Power */ + { 0x1F, 0x3E }, /* VGH_R = 15V */ + { 0x20, 0x28 }, /* VGL_R = -12V */ + { 0x21, 0x28 }, /* VGL_R2 = -12V */ + { 0x22, 0x7E }, + /* SETPANEL */ + { 0x35, 0x26 }, + { 0x37, 0x09 }, + /* SET RGBCYC */ + { 0x38, 0x04 }, + { 0x39, 0x00 }, + { 0x3A, 0x01 }, + { 0x3C, 0x7C }, + { 0x3D, 0xFF }, + { 0x3E, 0xFF }, + { 0x3F, 0x7F }, + /* Set TCON */ + { 0x40, 0x06 }, /* RSO = 800 RGB */ + { 0x41, 0xA0 }, /* LN = 640->1280 line */ + { 0x42, 0x81 }, + { 0x43, 0x08 }, /* VFP = 8 */ + { 0x44, 0x0B }, /* VBP = 12 */ + { 0x45, 0x28 }, /* HBP = 40 */ + /* power voltage */ + { 0x55, 0x0F }, /* DCDCM = 0001, JD PWR_IC */ + { 0x57, 0x69 }, + { 0x59, 0x0A }, /* VCL = -2.9V */ + { 0x5A, 0x28 }, /* VGH = 15V */ + { 0x5B, 0x14 }, /* VGL = -11V */ + /* Gamma */ + { 0x5D, 0x7C }, + { 0x5E, 0x65 }, + { 0x5F, 0x55 }, + { 0x60, 0x47 }, + { 0x61, 0x43 }, + { 0x62, 0x32 }, + { 0x63, 0x34 }, + { 0x64, 0x1C }, + { 0x65, 0x33 }, + { 0x66, 0x31 }, + { 0x67, 0x30 }, + { 0x68, 0x4E }, + { 0x69, 0x3C }, + { 0x6A, 0x44 }, + { 0x6B, 0x35 }, + { 0x6C, 0x31 }, + { 0x6D, 0x23 }, + { 0x6E, 0x11 }, + { 0x6F, 0x00 }, + { 0x70, 0x7C }, + { 0x71, 0x65 }, + { 0x72, 0x55 }, + { 0x73, 0x47 }, + { 0x74, 0x43 }, + { 0x75, 0x32 }, + { 0x76, 0x34 }, + { 0x77, 0x1C }, + { 0x78, 0x33 }, + { 0x79, 0x31 }, + { 0x7A, 0x30 }, + { 0x7B, 0x4E }, + { 0x7C, 0x3C }, + { 0x7D, 0x44 }, + { 0x7E, 0x35 }, + { 0x7F, 0x31 }, + { 0x80, 0x23 }, + { 0x81, 0x11 }, + { 0x82, 0x00 }, + /* Page2, for GIP */ + { 0xE0, 0x02 }, + /* GIP_L Pin mapping */ + { 0x00, 0x1E }, + { 0x01, 0x1E }, + { 0x02, 0x41 }, + { 0x03, 0x41 }, + { 0x04, 0x43 }, + { 0x05, 0x43 }, + { 0x06, 0x1F }, + { 0x07, 0x1F }, + { 0x08, 0x35 }, + { 0x09, 0x1F }, + { 0x0A, 0x15 }, + { 0x0B, 0x15 }, + { 0x0C, 0x1F }, + { 0x0D, 0x47 }, + { 0x0E, 0x47 }, + { 0x0F, 0x45 }, + { 0x10, 0x45 }, + { 0x11, 0x4B }, + { 0x12, 0x4B }, + { 0x13, 0x49 }, + { 0x14, 0x49 }, + { 0x15, 0x1F }, + /* GIP_R Pin mapping */ + { 0x16, 0x1E }, + { 0x17, 0x1E }, + { 0x18, 0x40 }, + { 0x19, 0x40 }, + { 0x1A, 0x42 }, + { 0x1B, 0x42 }, + { 0x1C, 0x1F }, + { 0x1D, 0x1F }, + { 0x1E, 0x35 }, + { 0x1F, 0x1F }, + { 0x20, 0x15 }, + { 0x21, 0x15 }, + { 0x22, 0x1f }, + { 0x23, 0x46 }, + { 0x24, 0x46 }, + { 0x25, 0x44 }, + { 0x26, 0x44 }, + { 0x27, 0x4A }, + { 0x28, 0x4A }, + { 0x29, 0x48 }, + { 0x2A, 0x48 }, + { 0x2B, 0x1F }, + /* GIP Timing */ + { 0x58, 0x40 }, + { 0x5B, 0x30 }, + { 0x5C, 0x03 }, + { 0x5D, 0x30 }, + { 0x5E, 0x01 }, + { 0x5F, 0x02 }, + { 0x63, 0x14 }, + { 0x64, 0x6A }, + { 0x67, 0x73 }, + { 0x68, 0x05 }, + { 0x69, 0x14 }, + { 0x6A, 0x6A }, + { 0x6B, 0x08 }, + { 0x6C, 0x00 }, + { 0x6D, 0x00 }, + { 0x6E, 0x00 }, + { 0x6F, 0x88 }, + { 0x77, 0xDD }, + { 0x79, 0x0E }, + { 0x7A, 0x03 }, + { 0x7D, 0x14 }, + { 0x7E, 0x6A }, + /* Page4 */ + { 0xE0, 0x04 }, + { 0x09, 0x11 }, + { 0x0E, 0x48 }, + { 0x2B, 0x2B }, + { 0x2D, 0x03 }, + { 0x2E, 0x44 }, + /* Page0 */ + { 0xE0, 0x00 }, + { 0xE6, 0x02 }, + { 0xE7, 0x0C }, +}; + +static const struct drm_display_mode ltk101b4029w_mode = { + .hdisplay = 800, + .hsync_start = 800 + 18, + .hsync_end = 800 + 18 + 18, + .htotal = 800 + 18 + 18 + 18, + .vdisplay = 1280, + .vsync_start = 1280 + 24, + .vsync_end = 1280 + 24 + 4, + .vtotal = 1280 + 24 + 4 + 8, + .clock = 67330, + .width_mm = 136, + .height_mm = 218, +}; + +static const struct ltk500hd1829_desc ltk101b4029w_data = { + .mode = <k101b4029w_mode, + .init = ltk101b4029w_init, + .num_init = ARRAY_SIZE(ltk101b4029w_init), }; /* * There is no description in the Reference Manual about these commands. * We received them from the vendor, so just use them as is. */ -static const struct ltk500hd1829_cmd init_code[] = { +static const struct ltk500hd1829_cmd ltk500hd1829_init[] = { { 0xE0, 0x00 }, { 0xE1, 0x93 }, { 0xE2, 0x65 }, @@ -260,6 +460,26 @@ static const struct ltk500hd1829_cmd init_code[] = { { 0x35, 0x00 }, }; +static const struct drm_display_mode ltk500hd1829_mode = { + .hdisplay = 720, + .hsync_start = 720 + 50, + .hsync_end = 720 + 50 + 50, + .htotal = 720 + 50 + 50 + 50, + .vdisplay = 1280, + .vsync_start = 1280 + 30, + .vsync_end = 1280 + 30 + 4, + .vtotal = 1280 + 30 + 4 + 12, + .clock = 69217, + .width_mm = 62, + .height_mm = 110, +}; + +static const struct ltk500hd1829_desc ltk500hd1829_data = { + .mode = <k500hd1829_mode, + .init = ltk500hd1829_init, + .num_init = ARRAY_SIZE(ltk500hd1829_init), +}; + static inline struct ltk500hd1829 *panel_to_ltk500hd1829(struct drm_panel *panel) { @@ -324,8 +544,8 @@ static int ltk500hd1829_prepare(struct drm_panel *panel) /* tRT: >= 5ms */ usleep_range(5000, 6000); - for (i = 0; i < ARRAY_SIZE(init_code); i++) { - ret = mipi_dsi_generic_write(dsi, &init_code[i], + for (i = 0; i < ctx->panel_desc->num_init; i++) { + ret = mipi_dsi_generic_write(dsi, &ctx->panel_desc->init[i], sizeof(struct ltk500hd1829_cmd)); if (ret < 0) { dev_err(panel->dev, "failed to write init cmds: %d\n", ret); @@ -359,31 +579,17 @@ disable_vcc: return ret; } -static const struct drm_display_mode default_mode = { - .hdisplay = 720, - .hsync_start = 720 + 50, - .hsync_end = 720 + 50 + 50, - .htotal = 720 + 50 + 50 + 50, - .vdisplay = 1280, - .vsync_start = 1280 + 30, - .vsync_end = 1280 + 30 + 4, - .vtotal = 1280 + 30 + 4 + 12, - .clock = 69217, - .width_mm = 62, - .height_mm = 110, -}; - static int ltk500hd1829_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct ltk500hd1829 *ctx = panel_to_ltk500hd1829(panel); struct drm_display_mode *mode; - mode = drm_mode_duplicate(connector->dev, &default_mode); + mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode); if (!mode) { dev_err(ctx->dev, "failed to add mode %ux%u@%u\n", - default_mode.hdisplay, default_mode.vdisplay, - drm_mode_vrefresh(&default_mode)); + ctx->panel_desc->mode->hdisplay, ctx->panel_desc->mode->vdisplay, + drm_mode_vrefresh(ctx->panel_desc->mode)); return -ENOMEM; } @@ -413,6 +619,10 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi) if (!ctx) return -ENOMEM; + ctx->panel_desc = of_device_get_match_data(dev); + if (!ctx->panel_desc) + return -EINVAL; + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) { dev_err(dev, "cannot get reset gpio\n"); @@ -492,7 +702,14 @@ static void ltk500hd1829_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id ltk500hd1829_of_match[] = { - { .compatible = "leadtek,ltk500hd1829", }, + { + .compatible = "leadtek,ltk101b4029w", + .data = <k101b4029w_data, + }, + { + .compatible = "leadtek,ltk500hd1829", + .data = <k500hd1829_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ltk500hd1829_of_match); diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c index 5703f4712d96..76c2a8f6718c 100644 --- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c +++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c @@ -72,6 +72,7 @@ static int atana33xc20_suspend(struct device *dev) if (p->el3_was_on) atana33xc20_wait(p->el_on3_off_time, 150); + drm_dp_dpcd_set_powered(p->aux, false); ret = regulator_disable(p->supply); if (ret) return ret; @@ -93,6 +94,7 @@ static int atana33xc20_resume(struct device *dev) ret = regulator_enable(p->supply); if (ret) return ret; + drm_dp_dpcd_set_powered(p->aux, true); p->powered_on_time = ktime_get_boottime(); if (p->no_hpd) { diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 7606cc68d96a..20e3df1c59d4 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1367,6 +1367,23 @@ static const struct drm_display_mode boe_bp101wx1_100_mode = { .vtotal = 800 + 6 + 8 + 2, }; +static const struct panel_desc boe_bp082wx1_100 = { + .modes = &boe_bp101wx1_100_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 177, + .height = 110, + }, + .delay = { + .enable = 50, + .disable = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct panel_desc boe_bp101wx1_100 = { .modes = &boe_bp101wx1_100_mode, .num_modes = 1, @@ -4374,6 +4391,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "bananapi,s070wv20-ct16", .data = &bananapi_s070wv20_ct16, }, { + .compatible = "boe,bp082wx1-100", + .data = &boe_bp082wx1_100, + }, { .compatible = "boe,bp101wx1-100", .data = &boe_bp101wx1_100, }, { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c index b55bafd1a8be..a3e142f156d5 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c @@ -62,6 +62,7 @@ struct st7703 { struct dentry *debugfs; const struct st7703_panel_desc *desc; + enum drm_panel_orientation orientation; }; struct st7703_panel_desc { @@ -521,6 +522,96 @@ static const struct st7703_panel_desc rgb30panel_desc = { .init_sequence = rgb30panel_init_sequence, }; +static int rgb10max3_panel_init_sequence(struct st7703 *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + + /* Init sequence extracted from Powkiddy RGB10MAX3 BSP kernel. */ + + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xf1, 0x12, 0x83); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETAPID, 0x00, 0x00, 0x00, 0xda, + 0x80); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0xc8, 0x02, 0x30); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x10, 0x10, 0x28, + 0x28, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x04, 0x04); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x78, 0x78); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25, 0x22, 0xf0, + 0x63); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x33, 0x81, 0x05, 0xf9, + 0x0e, 0x0e, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x44, 0x25, 0x00, 0x90, 0x0a, 0x00, + 0x00, 0x01, 0x4f, 0x01, 0x00, 0x00, 0x37); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x47); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50, + 0x00, 0x00, 0x12, 0x70, 0x00); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x25, 0x00, 0x32, + 0x32, 0x77, 0xe1, 0xff, 0xff, 0xcc, 0xcc, 0x77, + 0x77); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETECO, 0x82, 0x00, 0xbf, 0xff, + 0x00, 0xff); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETIO, 0xb8, 0x00, 0x0a, 0x00, + 0x00, 0x00); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCABC, 0x10, 0x40, 0x1e, + 0x02); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0b); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x04, 0x07, + 0x2a, 0x39, 0x3f, 0x36, 0x31, 0x06, 0x0b, 0x0e, + 0x12, 0x14, 0x12, 0x13, 0x0f, 0x17, 0x00, 0x04, + 0x07, 0x2a, 0x39, 0x3f, 0x36, 0x31, 0x06, 0x0b, + 0x0e, 0x12, 0x14, 0x12, 0x13, 0x0f, 0x17); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x03, 0x03, 0x03, 0x03, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, + 0xc0, 0x10); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0xc8, 0x10, 0x08, 0x00, + 0x00, 0x41, 0xf8, 0x12, 0x31, 0x23, 0x37, 0x86, + 0x11, 0xc8, 0x37, 0x2a, 0x00, 0x00, 0x0c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x88, 0x20, 0x46, 0x02, 0x88, 0x88, 0x88, 0x88, + 0x88, 0x88, 0xff, 0x88, 0x31, 0x57, 0x13, 0x88, + 0x88, 0x88, 0x88, 0x88, 0x88, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x00, 0x1a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x8f, 0x13, 0x31, 0x75, 0x88, 0x88, 0x88, 0x88, + 0x88, 0x88, 0xf8, 0x8f, 0x02, 0x20, 0x64, 0x88, + 0x88, 0x88, 0x88, 0x88, 0x88, 0xf8, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00); + mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_EF, 0xff, 0xff, 0x01); + + return 0; +} + +static const struct drm_display_mode rgb10max3_panel_mode = { + .hdisplay = 720, + .hsync_start = 720 + 40, + .hsync_end = 720 + 40 + 10, + .htotal = 720 + 40 + 10 + 40, + .vdisplay = 1280, + .vsync_start = 1280 + 16, + .vsync_end = 1280 + 16 + 4, + .vtotal = 1280 + 16 + 4 + 14, + .clock = 63800, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 62, + .height_mm = 109, +}; + +static const struct st7703_panel_desc rgb10max3_panel_desc = { + .mode = &rgb10max3_panel_mode, + .lanes = 4, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_LPM, + .format = MIPI_DSI_FMT_RGB888, + .init_sequence = rgb10max3_panel_init_sequence, +}; + static int st7703_enable(struct drm_panel *panel) { struct st7703 *ctx = panel_to_st7703(panel); @@ -653,12 +744,20 @@ static int st7703_get_modes(struct drm_panel *panel, return 1; } +static enum drm_panel_orientation st7703_get_orientation(struct drm_panel *panel) +{ + struct st7703 *st7703 = panel_to_st7703(panel); + + return st7703->orientation; +} + static const struct drm_panel_funcs st7703_drm_funcs = { .disable = st7703_disable, .unprepare = st7703_unprepare, .prepare = st7703_prepare, .enable = st7703_enable, .get_modes = st7703_get_modes, + .get_orientation = st7703_get_orientation, }; static int allpixelson_set(void *data, u64 val) @@ -727,6 +826,10 @@ static int st7703_probe(struct mipi_dsi_device *dsi) return dev_err_probe(dev, PTR_ERR(ctx->iovcc), "Failed to request iovcc regulator\n"); + ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation); + if (ret < 0) + return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n"); + drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs, DRM_MODE_CONNECTOR_DSI); @@ -784,6 +887,7 @@ static void st7703_remove(struct mipi_dsi_device *dsi) static const struct of_device_id st7703_of_match[] = { { .compatible = "anbernic,rg353v-panel-v2", .data = &rg353v2_desc }, + { .compatible = "powkiddy,rgb10max3-panel", .data = &rgb10max3_panel_desc }, { .compatible = "powkiddy,rgb30-panel", .data = &rgb30panel_desc }, { .compatible = "rocktech,jh057n00900", .data = &jh057n00900_panel_desc }, { .compatible = "xingbangda,xbd599", .data = &xbd599_desc }, diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h index e8fae5c77514..2bfd6d0ff050 100644 --- a/drivers/gpu/drm/radeon/atom-bits.h +++ b/drivers/gpu/drm/radeon/atom-bits.h @@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr) #define CU8(ptr) get_u8(ctx->bios, (ptr)) static inline uint16_t get_u16(void *bios, int ptr) { - return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); + return get_u8(bios, ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); } #define U16(ptr) get_u16(ctx->ctx->bios, (ptr)) #define CU16(ptr) get_u16(ctx->bios, (ptr)) diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index ceb6d772ef94..5bc3e6b41c34 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -60,6 +60,7 @@ typedef struct { struct atom_context *ctx; uint32_t *ps, *ws; + int ps_size, ws_size; int ps_shift; uint16_t start; unsigned last_jump; @@ -68,8 +69,8 @@ typedef struct { } atom_exec_context; int atom_debug = 0; -static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); -int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); +static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size); +int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size); static uint32_t atom_arg_mask[8] = { 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000, @@ -221,7 +222,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, (*ptr)++; /* get_unaligned_le32 avoids unaligned accesses from atombios * tables, noticed on a DEC Alpha. */ - val = get_unaligned_le32((u32 *)&ctx->ps[idx]); + if (idx < ctx->ps_size) + val = get_unaligned_le32((u32 *)&ctx->ps[idx]); + else + pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size); if (print) DEBUG("PS[0x%02X,0x%04X]", idx, val); break; @@ -259,7 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, val = gctx->reg_block; break; default: - val = ctx->ws[idx]; + if (idx < ctx->ws_size) + val = ctx->ws[idx]; + else + pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size); } break; case ATOM_ARG_ID: @@ -494,6 +501,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, idx = U8(*ptr); (*ptr)++; DEBUG("PS[0x%02X]", idx); + if (idx >= ctx->ps_size) { + pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size); + return; + } ctx->ps[idx] = cpu_to_le32(val); break; case ATOM_ARG_WS: @@ -526,6 +537,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, gctx->reg_block = val; break; default: + if (idx >= ctx->ws_size) { + pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size); + return; + } ctx->ws[idx] = val; } break; @@ -623,7 +638,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) else SDEBUG(" table: %d\n", idx); if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) - r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); + r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift); if (r) { ctx->abort = true; } @@ -1152,7 +1167,7 @@ static struct { atom_op_shr, ATOM_ARG_MC}, { atom_op_debug, 0},}; -static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) +static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; @@ -1174,12 +1189,16 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 ectx.ps_shift = ps / 4; ectx.start = base; ectx.ps = params; + ectx.ps_size = params_size; ectx.abort = false; ectx.last_jump = 0; - if (ws) + if (ws) { ectx.ws = kcalloc(4, ws, GFP_KERNEL); - else + ectx.ws_size = ws; + } else { ectx.ws = NULL; + ectx.ws_size = 0; + } debug_depth++; while (1) { @@ -1212,7 +1231,7 @@ free: return ret; } -int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params) +int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params, int params_size) { int r; @@ -1228,16 +1247,16 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin /* reset divmul */ ctx->divmul[0] = 0; ctx->divmul[1] = 0; - r = atom_execute_table_locked(ctx, index, params); + r = atom_execute_table_locked(ctx, index, params, params_size); mutex_unlock(&ctx->mutex); return r; } -int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) +int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size) { int r; mutex_lock(&ctx->scratch_mutex); - r = atom_execute_table_scratch_unlocked(ctx, index, params); + r = atom_execute_table_scratch_unlocked(ctx, index, params, params_size); mutex_unlock(&ctx->scratch_mutex); return r; } @@ -1335,7 +1354,7 @@ int atom_asic_init(struct atom_context *ctx) if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; - ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); + ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16); if (ret) return ret; @@ -1343,7 +1362,7 @@ int atom_asic_init(struct atom_context *ctx) if (rdev->family < CHIP_R600) { if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) - atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); + atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps, 16); } return ret; } diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 5de0563b63d2..5bf06c0bd6ff 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -145,8 +145,8 @@ struct atom_context { extern int atom_debug; struct atom_context *atom_parse(struct card_info *, void *); -int atom_execute_table(struct atom_context *, int, uint32_t *); -int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *); +int atom_execute_table(struct atom_context *, int, uint32_t *, int); +int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *, int); int atom_asic_init(struct atom_context *); void atom_destroy(struct atom_context *); bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index ade13173921b..9b3a3a9d60e2 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -77,7 +77,7 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); break; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void atombios_scaler_setup(struct drm_crtc *crtc) @@ -157,7 +157,7 @@ static void atombios_scaler_setup(struct drm_crtc *crtc) break; } } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); if ((is_tv || is_cv) && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) { atom_rv515_force_tv_scaler(rdev, radeon_crtc); @@ -178,7 +178,7 @@ static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = lock; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void atombios_enable_crtc(struct drm_crtc *crtc, int state) @@ -194,7 +194,7 @@ static void atombios_enable_crtc(struct drm_crtc *crtc, int state) args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = state; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) @@ -210,7 +210,7 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state) args.ucCRTC = radeon_crtc->crtc_id; args.ucEnable = state; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static const u32 vga_control_regs[6] = @@ -242,7 +242,7 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state) args.ucCRTC = radeon_crtc->crtc_id; args.ucBlanking = state; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); if (ASIC_IS_DCE8(rdev)) WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control); @@ -261,7 +261,7 @@ static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) args.ucDispPipeId = radeon_crtc->crtc_id; args.ucEnable = state; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -343,7 +343,7 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void atombios_crtc_set_timing(struct drm_crtc *crtc, @@ -389,7 +389,7 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void atombios_disable_ss(struct radeon_device *rdev, int pll_id) @@ -546,7 +546,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; args.lvds_ss.ucEnable = enable; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } union adjust_pixel_clock { @@ -692,7 +692,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, ADJUST_DISPLAY_CONFIG_SS_ENABLE; atom_execute_table(rdev->mode_info.atom_context, - index, (uint32_t *)&args); + index, (uint32_t *)&args, sizeof(args)); adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; case 3: @@ -725,7 +725,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.ucExtTransmitterID = 0; atom_execute_table(rdev->mode_info.atom_context, - index, (uint32_t *)&args); + index, (uint32_t *)&args, sizeof(args)); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; @@ -809,7 +809,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev, DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void atombios_crtc_program_pll(struct drm_crtc *crtc, @@ -949,7 +949,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, return; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 009333645438..fca8b08535a5 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -112,7 +112,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, if (ASIC_IS_DCE4(rdev)) args.v2.ucHPD_ID = chan->rec.hpd; - atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *ack = args.v1.ucReplyStatus; @@ -354,7 +354,7 @@ static u8 radeon_dp_encoder_service(struct radeon_device *rdev, args.ucLaneNum = lane_num; args.ucStatus = 0; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); return args.ucStatus; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 6e537c5bd295..2bff0d9e20f5 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -119,12 +119,12 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); if (dig->backlight_level == 0) { args.ucAction = ATOM_LCD_BLOFF; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } else { args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: @@ -389,7 +389,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) } args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } @@ -445,7 +445,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } @@ -546,7 +546,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action) break; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } union lvds_encoder_control { @@ -664,7 +664,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) break; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } int @@ -979,7 +979,7 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m break; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } @@ -1361,7 +1361,7 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t break; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void @@ -1397,7 +1397,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) args.v1.ucAction = action; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); /* wait for the panel to power up */ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { @@ -1519,7 +1519,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } static void @@ -1554,7 +1554,7 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable) args.ucEnable = ATOM_ENABLE; args.ucCRTC = radeon_crtc->crtc_id; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); WREG32(reg, temp); } @@ -1618,10 +1618,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); WREG32(RADEON_BIOS_3_SCRATCH, reg); } else - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (rdev->mode_info.bl_encoder) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -1629,7 +1629,7 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } else { args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } } break; @@ -1637,10 +1637,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: args.ucAction = ATOM_DISABLE; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLOFF; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } break; } @@ -1983,7 +1983,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) return; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); /* update scratch regs with new routing */ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); @@ -2311,7 +2311,7 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); return true; } else diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index ab4d21072191..730f0b25312b 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -78,7 +78,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; - atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 4e64ed38c439..70931b04bbac 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -53,8 +53,7 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev); //********* BARTS **************// -static const u32 barts_cgcg_cgls_default[] = -{ +static const u32 barts_cgcg_cgls_default[] = { /* Register, Value, Mask bits */ 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, @@ -107,8 +106,7 @@ static const u32 barts_cgcg_cgls_default[] = }; #define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32)) -static const u32 barts_cgcg_cgls_disable[] = -{ +static const u32 barts_cgcg_cgls_disable[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, @@ -162,8 +160,7 @@ static const u32 barts_cgcg_cgls_disable[] = }; #define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32)) -static const u32 barts_cgcg_cgls_enable[] = -{ +static const u32 barts_cgcg_cgls_enable[] = { /* 0x0000c124, 0x84180000, 0x00180000, */ 0x00000644, 0x000f7892, 0x001f4080, 0x000008f8, 0x00000010, 0xffffffff, @@ -217,8 +214,7 @@ static const u32 barts_cgcg_cgls_enable[] = }; #define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32)) -static const u32 barts_mgcg_default[] = -{ +static const u32 barts_mgcg_default[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x00005448, 0x00000100, 0xffffffff, 0x000055e4, 0x00600100, 0xffffffff, @@ -366,8 +362,7 @@ static const u32 barts_mgcg_default[] = }; #define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32)) -static const u32 barts_mgcg_disable[] = -{ +static const u32 barts_mgcg_disable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, @@ -381,8 +376,7 @@ static const u32 barts_mgcg_disable[] = }; #define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32)) -static const u32 barts_mgcg_enable[] = -{ +static const u32 barts_mgcg_enable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, @@ -397,8 +391,7 @@ static const u32 barts_mgcg_enable[] = #define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32)) //********* CAICOS **************// -static const u32 caicos_cgcg_cgls_default[] = -{ +static const u32 caicos_cgcg_cgls_default[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, @@ -450,8 +443,7 @@ static const u32 caicos_cgcg_cgls_default[] = }; #define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32)) -static const u32 caicos_cgcg_cgls_disable[] = -{ +static const u32 caicos_cgcg_cgls_disable[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, @@ -505,8 +497,7 @@ static const u32 caicos_cgcg_cgls_disable[] = }; #define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32)) -static const u32 caicos_cgcg_cgls_enable[] = -{ +static const u32 caicos_cgcg_cgls_enable[] = { /* 0x0000c124, 0x84180000, 0x00180000, */ 0x00000644, 0x000f7892, 0x001f4080, 0x000008f8, 0x00000010, 0xffffffff, @@ -560,8 +551,7 @@ static const u32 caicos_cgcg_cgls_enable[] = }; #define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32)) -static const u32 caicos_mgcg_default[] = -{ +static const u32 caicos_mgcg_default[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x00005448, 0x00000100, 0xffffffff, 0x000055e4, 0x00600100, 0xffffffff, @@ -640,8 +630,7 @@ static const u32 caicos_mgcg_default[] = }; #define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32)) -static const u32 caicos_mgcg_disable[] = -{ +static const u32 caicos_mgcg_disable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, @@ -655,8 +644,7 @@ static const u32 caicos_mgcg_disable[] = }; #define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32)) -static const u32 caicos_mgcg_enable[] = -{ +static const u32 caicos_mgcg_enable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, @@ -671,8 +659,7 @@ static const u32 caicos_mgcg_enable[] = #define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32)) //********* TURKS **************// -static const u32 turks_cgcg_cgls_default[] = -{ +static const u32 turks_cgcg_cgls_default[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, @@ -724,8 +711,7 @@ static const u32 turks_cgcg_cgls_default[] = }; #define TURKS_CGCG_CGLS_DEFAULT_LENGTH sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32)) -static const u32 turks_cgcg_cgls_disable[] = -{ +static const u32 turks_cgcg_cgls_disable[] = { 0x000008f8, 0x00000010, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, 0x000008f8, 0x00000011, 0xffffffff, @@ -779,8 +765,7 @@ static const u32 turks_cgcg_cgls_disable[] = }; #define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32)) -static const u32 turks_cgcg_cgls_enable[] = -{ +static const u32 turks_cgcg_cgls_enable[] = { /* 0x0000c124, 0x84180000, 0x00180000, */ 0x00000644, 0x000f7892, 0x001f4080, 0x000008f8, 0x00000010, 0xffffffff, @@ -835,8 +820,7 @@ static const u32 turks_cgcg_cgls_enable[] = #define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32)) // These are the sequences for turks_mgcg_shls -static const u32 turks_mgcg_default[] = -{ +static const u32 turks_mgcg_default[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x00005448, 0x00000100, 0xffffffff, 0x000055e4, 0x00600100, 0xffffffff, @@ -935,8 +919,7 @@ static const u32 turks_mgcg_default[] = }; #define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32)) -static const u32 turks_mgcg_disable[] = -{ +static const u32 turks_mgcg_disable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0xffffffff, 0xffffffff, @@ -950,8 +933,7 @@ static const u32 turks_mgcg_disable[] = }; #define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32)) -static const u32 turks_mgcg_enable[] = -{ +static const u32 turks_mgcg_enable[] = { 0x0000802c, 0xc0000000, 0xffffffff, 0x000008f8, 0x00000000, 0xffffffff, 0x000008fc, 0x00000000, 0xffffffff, @@ -972,8 +954,7 @@ static const u32 turks_mgcg_enable[] = //********* BARTS **************// -static const u32 barts_sysls_default[] = -{ +static const u32 barts_sysls_default[] = { /* Register, Value, Mask bits */ 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, @@ -993,8 +974,7 @@ static const u32 barts_sysls_default[] = }; #define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32)) -static const u32 barts_sysls_disable[] = -{ +static const u32 barts_sysls_disable[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x00041401, 0xffffffff, @@ -1013,8 +993,7 @@ static const u32 barts_sysls_disable[] = }; #define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32)) -static const u32 barts_sysls_enable[] = -{ +static const u32 barts_sysls_enable[] = { 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, @@ -1034,8 +1013,7 @@ static const u32 barts_sysls_enable[] = #define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32)) //********* CAICOS **************// -static const u32 caicos_sysls_default[] = -{ +static const u32 caicos_sysls_default[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, @@ -1053,8 +1031,7 @@ static const u32 caicos_sysls_default[] = }; #define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32)) -static const u32 caicos_sysls_disable[] = -{ +static const u32 caicos_sysls_disable[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x00041401, 0xffffffff, @@ -1072,8 +1049,7 @@ static const u32 caicos_sysls_disable[] = }; #define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32)) -static const u32 caicos_sysls_enable[] = -{ +static const u32 caicos_sysls_enable[] = { 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, @@ -1092,8 +1068,7 @@ static const u32 caicos_sysls_enable[] = #define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32)) //********* TURKS **************// -static const u32 turks_sysls_default[] = -{ +static const u32 turks_sysls_default[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, @@ -1112,8 +1087,7 @@ static const u32 turks_sysls_default[] = }; #define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32)) -static const u32 turks_sysls_disable[] = -{ +static const u32 turks_sysls_disable[] = { 0x000055e8, 0x00000000, 0xffffffff, 0x0000d0bc, 0x00000000, 0xffffffff, 0x000015c0, 0x00041401, 0xffffffff, @@ -1132,8 +1106,7 @@ static const u32 turks_sysls_disable[] = }; #define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32)) -static const u32 turks_sysls_enable[] = -{ +static const u32 turks_sysls_enable[] = { 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, 0x000015c0, 0x000c1401, 0xffffffff, @@ -1154,8 +1127,7 @@ static const u32 turks_sysls_enable[] = #endif -u32 btc_valid_sclk[40] = -{ +u32 btc_valid_sclk[40] = { 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, 85000, 90000, 95000, 100000, 105000, 110000, 11500, 120000, 125000, 130000, 135000, 140000, 145000, 150000, @@ -1194,7 +1166,7 @@ void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_t if ((table == NULL) || (table->count == 0)) return; - for (i= 0; i < table->count; i++) { + for (i = 0; i < table->count; i++) { if (clock <= table->entries[i].clk) { if (*voltage < table->entries[i].v) *voltage = (u16)((table->entries[i].v < max_voltage) ? @@ -1441,7 +1413,7 @@ void btc_program_mgcg_hw_sequence(struct radeon_device *rdev, u32 i, length = count * 3; u32 tmp; - for (i = 0; i < length; i+=3) { + for (i = 0; i < length; i += 3) { tmp = RREG32(sequence[i]); tmp &= ~sequence[i+2]; tmp |= sequence[i+1] & sequence[i+2]; @@ -2003,7 +1975,7 @@ static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, for (i = 0; i < table->num_entries; i++) { eg_table->mc_reg_table_entry[i].mclk_max = table->mc_reg_table_entry[i].mclk_max; - for(j = 0; j < table->last; j++) + for (j = 0; j < table->last; j++) eg_table->mc_reg_table_entry[i].mc_data[j] = table->mc_reg_table_entry[i].mc_data[j]; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index b8f4dac68d85..abe9d65cc460 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -46,36 +46,31 @@ #define VOLTAGE_VID_OFFSET_SCALE1 625 #define VOLTAGE_VID_OFFSET_SCALE2 100 -static const struct ci_pt_defaults defaults_hawaii_xt = -{ +static const struct ci_pt_defaults defaults_hawaii_xt = { 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } }; -static const struct ci_pt_defaults defaults_hawaii_pro = -{ +static const struct ci_pt_defaults defaults_hawaii_pro = { 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } }; -static const struct ci_pt_defaults defaults_bonaire_xt = -{ +static const struct ci_pt_defaults defaults_bonaire_xt = { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }; -static const struct ci_pt_defaults defaults_saturn_xt = -{ +static const struct ci_pt_defaults defaults_saturn_xt = { 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } }; -static const struct ci_pt_config_reg didt_config_ci[] = -{ +static const struct ci_pt_config_reg didt_config_ci[] = { { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, @@ -1216,7 +1211,7 @@ static void ci_thermal_initialize(struct radeon_device *rdev) if (rdev->pm.fan_pulses_per_revolution) { tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; - tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); + tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution - 1); WREG32_SMC(CG_TACH_CTRL, tmp); } @@ -3333,7 +3328,7 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev) } static void ci_reset_single_dpm_table(struct radeon_device *rdev, - struct ci_single_dpm_table* dpm_table, + struct ci_single_dpm_table *dpm_table, u32 count) { u32 i; @@ -3343,7 +3338,7 @@ static void ci_reset_single_dpm_table(struct radeon_device *rdev, dpm_table->dpm_levels[i].enabled = false; } -static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, +static void ci_setup_pcie_table_entry(struct ci_single_dpm_table *dpm_table, u32 index, u32 pcie_gen, u32 pcie_lanes) { dpm_table->dpm_levels[index].value = pcie_gen; @@ -3503,7 +3498,7 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table, u32 i; int ret = -EINVAL; - for(i = 0; i < table->count; i++) { + for (i = 0; i < table->count; i++) { if (value == table->dpm_levels[i].value) { *boot_level = i; ret = 0; @@ -4304,7 +4299,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev, for (i = 0, j = table->last; i < table->last; i++) { if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; - switch(table->mc_reg_address[i].s1 << 2) { + switch (table->mc_reg_address[i].s1 << 2) { case MC_SEQ_MISC1: temp_reg = RREG32(MC_PMG_CMD_EMRS); table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; @@ -4369,7 +4364,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) { bool result = true; - switch(in_reg) { + switch (in_reg) { case MC_SEQ_RAS_TIMING >> 2: *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; break; @@ -4508,7 +4503,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev, for (i = 0; i < table->last; i++) { if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; - switch(table->mc_reg_address[i].s1 >> 2) { + switch (table->mc_reg_address[i].s1 >> 2) { case MC_SEQ_MISC1: for (k = 0; k < table->num_entries; k++) { if ((table->mc_reg_table_entry[k].mclk_max == 125000) || @@ -4683,7 +4678,7 @@ static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, struct ci_power_info *pi = ci_get_pi(rdev); u32 i = 0; - for(i = 0; i < pi->mc_reg_table.num_entries; i++) { + for (i = 0; i < pi->mc_reg_table.num_entries; i++) { if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) break; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h index ac12db5f2cf7..74b95c200222 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.h +++ b/drivers/gpu/drm/radeon/ci_dpm.h @@ -87,8 +87,7 @@ struct ci_mc_reg_table { SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; }; -struct ci_ulv_parm -{ +struct ci_ulv_parm { bool supported; u32 cg_ulv_parameter; u32 volt_change_delay; @@ -113,8 +112,7 @@ struct ci_dpm_level_enable_mask { u32 pcie_dpm_enable_mask; }; -struct ci_vbios_boot_state -{ +struct ci_vbios_boot_state { u16 mvdd_bootup_value; u16 vddc_bootup_value; u16 vddci_bootup_value; diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h index 4774e04c4da6..7693fb6624a3 100644 --- a/drivers/gpu/drm/radeon/clearstate_cayman.h +++ b/drivers/gpu/drm/radeon/clearstate_cayman.h @@ -23,8 +23,7 @@ #include "clearstate_defs.h" -static const u32 SECT_CONTEXT_def_1[] = -{ +static const u32 SECT_CONTEXT_def_1[] = { 0x00000000, // DB_RENDER_CONTROL 0x00000000, // DB_COUNT_CONTROL 0x00000000, // DB_DEPTH_VIEW @@ -514,8 +513,7 @@ static const u32 SECT_CONTEXT_def_1[] = 0x00000000, // CB_BLEND6_CONTROL 0x00000000, // CB_BLEND7_CONTROL }; -static const u32 SECT_CONTEXT_def_2[] = -{ +static const u32 SECT_CONTEXT_def_2[] = { 0x00000000, // PA_CL_POINT_X_RAD 0x00000000, // PA_CL_POINT_Y_RAD 0x00000000, // PA_CL_POINT_SIZE @@ -523,8 +521,7 @@ static const u32 SECT_CONTEXT_def_2[] = 0x00000000, // VGT_DMA_BASE_HI 0x00000000, // VGT_DMA_BASE }; -static const u32 SECT_CONTEXT_def_3[] = -{ +static const u32 SECT_CONTEXT_def_3[] = { 0x00000000, // DB_DEPTH_CONTROL 0x00000000, // DB_EQAA 0x00000000, // CB_COLOR_CONTROL diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h index c1b6c22dbed7..0045d42aa27c 100644 --- a/drivers/gpu/drm/radeon/clearstate_ci.h +++ b/drivers/gpu/drm/radeon/clearstate_ci.h @@ -23,8 +23,7 @@ #include "clearstate_defs.h" -static const unsigned int ci_SECT_CONTEXT_def_1[] = -{ +static const unsigned int ci_SECT_CONTEXT_def_1[] = { 0x00000000, // DB_RENDER_CONTROL 0x00000000, // DB_COUNT_CONTROL 0x00000000, // DB_DEPTH_VIEW diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a424b86008b8..c634dc28e6c3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2514,8 +2514,7 @@ static void evergreen_agp_enable(struct radeon_device *rdev) WREG32(VM_CONTEXT1_CNTL, 0); } -static const unsigned ni_dig_offsets[] = -{ +static const unsigned ni_dig_offsets[] = { NI_DIG0_REGISTER_OFFSET, NI_DIG1_REGISTER_OFFSET, NI_DIG2_REGISTER_OFFSET, @@ -2524,8 +2523,7 @@ static const unsigned ni_dig_offsets[] = NI_DIG5_REGISTER_OFFSET }; -static const unsigned ni_tx_offsets[] = -{ +static const unsigned ni_tx_offsets[] = { NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1, NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1, NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1, @@ -2534,8 +2532,7 @@ static const unsigned ni_tx_offsets[] = NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 }; -static const unsigned evergreen_dp_offsets[] = -{ +static const unsigned evergreen_dp_offsets[] = { EVERGREEN_DP0_REGISTER_OFFSET, EVERGREEN_DP1_REGISTER_OFFSET, EVERGREEN_DP2_REGISTER_OFFSET, @@ -2544,8 +2541,7 @@ static const unsigned evergreen_dp_offsets[] = EVERGREEN_DP5_REGISTER_OFFSET }; -static const unsigned evergreen_disp_int_status[] = -{ +static const unsigned evergreen_disp_int_status[] = { DISP_INTERRUPT_STATUS, DISP_INTERRUPT_STATUS_CONTINUE, DISP_INTERRUPT_STATUS_CONTINUE2, @@ -2643,7 +2639,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev, return; } - stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE; + stream_ctrl &= ~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE; WREG32(EVERGREEN_DP_VID_STREAM_CNTL + evergreen_dp_offsets[dig_fe], stream_ctrl); @@ -2655,7 +2651,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev, stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + evergreen_dp_offsets[dig_fe]); } - if (counter >= 32 ) + if (counter >= 32) DRM_ERROR("counter exceeds %d\n", counter); fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]); @@ -2716,7 +2712,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav /*for now we do it this manually*/ /**/ if (ASIC_IS_DCE5(rdev) && - evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe)) + evergreen_is_dp_sst_stream_enabled(rdev, i, &dig_fe)) evergreen_blank_dp_output(rdev, dig_fe); /*we could remove 6 lines below*/ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ @@ -3597,7 +3593,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); - sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); + sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 12 / 32); sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 0de79f3a7e3f..1fe6e0d883c7 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -33,8 +33,8 @@ #include "evergreen_reg_safe.h" #include "cayman_reg_safe.h" -#define MAX(a,b) (((a)>(b))?(a):(b)) -#define MIN(a,b) (((a)<(b))?(a):(b)) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm) diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index b436badf9efa..3ff9fda54aa3 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -265,8 +265,8 @@ #define NI_DIG_BE_CNTL 0x7140 -# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F) -# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 ) +# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8) & 0x3F) +# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7) #define NI_DIG_BE_EN_CNTL 0x7144 # define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0) @@ -284,7 +284,7 @@ #define EVERGREEN_DP_VID_STREAM_CNTL 0x730C # define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0) -# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16) +# define EVERGREEN_DP_VID_STREAM_STATUS (1 << 16) #define EVERGREEN_DP_STEER_FIFO 0x7310 # define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0) #define EVERGREEN_DP_SEC_CNTL 0x7280 @@ -302,8 +302,8 @@ # define EVERGREEN_DP_SEC_SS_EN (1 << 28) /*DCIO_UNIPHY block*/ -#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600) -#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600) +#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 - 0x6600) +#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 - 0x6600) #define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600) #define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600) #define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600) diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 3a03ba37d043..b34d54b567b7 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h @@ -29,8 +29,7 @@ #define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 -struct SMC_Evergreen_MCRegisterAddress -{ +struct SMC_Evergreen_MCRegisterAddress { uint16_t s0; uint16_t s1; }; @@ -38,15 +37,13 @@ struct SMC_Evergreen_MCRegisterAddress typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; -struct SMC_Evergreen_MCRegisterSet -{ +struct SMC_Evergreen_MCRegisterSet { uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; }; typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; -struct SMC_Evergreen_MCRegisters -{ +struct SMC_Evergreen_MCRegisters { uint8_t last; uint8_t reserved[3]; SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index f7735da07feb..55dbf450bd9c 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -64,8 +64,7 @@ extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); extern void cik_update_cg(struct radeon_device *rdev, u32 block, bool enable); -static const struct kv_pt_config_reg didt_config_kv[] = -{ +static const struct kv_pt_config_reg didt_config_kv[] = { { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, @@ -931,9 +930,9 @@ static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) pi->graphics_level[i].ClkBypassCntl = 2; else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) pi->graphics_level[i].ClkBypassCntl = 7; - else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) + else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200) pi->graphics_level[i].ClkBypassCntl = 6; - else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) + else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200) pi->graphics_level[i].ClkBypassCntl = 8; else pi->graphics_level[i].ClkBypassCntl = 0; @@ -1577,7 +1576,7 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev, if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].sclk_frequency) > (table->entries[pi->lowest_valid].sclk_frequency - - new_ps->levels[new_ps->num_levels -1].sclk)) + new_ps->levels[new_ps->num_levels - 1].sclk)) pi->highest_valid = pi->lowest_valid; else pi->lowest_valid = pi->highest_valid; diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c index c0a59527e7b8..65831cca6730 100644 --- a/drivers/gpu/drm/radeon/kv_smc.c +++ b/drivers/gpu/drm/radeon/kv_smc.c @@ -189,7 +189,7 @@ int kv_copy_bytes_to_smc(struct radeon_device *rdev, if (ret) return ret; - original_data= RREG32(SMC_IND_DATA_0); + original_data = RREG32(SMC_IND_DATA_0); extra_shift = 8 * (4 - byte_count); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 927e5f42e97d..77aee99e473a 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -66,8 +66,7 @@ void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); } -static const u32 tn_rlc_save_restore_register_list[] = -{ +static const u32 tn_rlc_save_restore_register_list[] = { 0x98fc, 0x98f0, 0x9834, @@ -216,8 +215,7 @@ MODULE_FIRMWARE("radeon/ARUBA_me.bin"); MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); -static const u32 cayman_golden_registers2[] = -{ +static const u32 cayman_golden_registers2[] = { 0x3e5c, 0xffffffff, 0x00000000, 0x3e48, 0xffffffff, 0x00000000, 0x3e4c, 0xffffffff, 0x00000000, @@ -226,8 +224,7 @@ static const u32 cayman_golden_registers2[] = 0x3e60, 0xffffffff, 0x00000000 }; -static const u32 cayman_golden_registers[] = -{ +static const u32 cayman_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, 0x5e78, 0x8f311ff1, 0x001000f0, 0x3f90, 0xffff0000, 0xff000000, @@ -267,16 +264,14 @@ static const u32 cayman_golden_registers[] = 0x8974, 0xffffffff, 0x00000000 }; -static const u32 dvst_golden_registers2[] = -{ +static const u32 dvst_golden_registers2[] = { 0x8f8, 0xffffffff, 0, 0x8fc, 0x00380000, 0, 0x8f8, 0xffffffff, 1, 0x8fc, 0x0e000000, 0 }; -static const u32 dvst_golden_registers[] = -{ +static const u32 dvst_golden_registers[] = { 0x690, 0x3fff3fff, 0x20c00033, 0x918c, 0x0fff0fff, 0x00010006, 0x91a8, 0x0fff0fff, 0x00010006, @@ -333,8 +328,7 @@ static const u32 dvst_golden_registers[] = 0x8974, 0xffffffff, 0x00000000 }; -static const u32 scrapper_golden_registers[] = -{ +static const u32 scrapper_golden_registers[] = { 0x690, 0x3fff3fff, 0x20c00033, 0x918c, 0x0fff0fff, 0x00010006, 0x918c, 0x0fff0fff, 0x00010006, @@ -624,7 +618,7 @@ static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { int ni_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; - u32 mem_type, running, blackout = 0; + u32 mem_type, running; u32 *io_mc_regs; int i, ucode_size, regs_size; @@ -659,11 +653,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -689,9 +678,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -754,7 +740,8 @@ int ni_init_microcode(struct radeon_device *rdev) rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; mc_req_size = 0; break; - default: BUG(); + default: + BUG(); } DRM_INFO("Loading %s Microcode\n", chip_name); @@ -813,7 +800,7 @@ int ni_init_microcode(struct radeon_device *rdev) err = 0; } else if (rdev->smc_fw->size != smc_req_size) { pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n", - rdev->mc_fw->size, fw_name); + rdev->smc_fw->size, fw_name); err = -EINVAL; } } diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 3e1c1a392fb7..e08559c44a5c 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -3103,9 +3103,6 @@ static int ni_init_simplified_leakage_table(struct radeon_device *rdev, u32 smc_leakage, max_leakage = 0; u32 scaling_factor; - if (!leakage_table) - return -EINVAL; - table_size = leakage_table->count; if (eg_pi->vddc_voltage_table.count != table_size) diff --git a/drivers/gpu/drm/radeon/ni_dpm.h b/drivers/gpu/drm/radeon/ni_dpm.h index 74e301936906..4e3e7303e035 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.h +++ b/drivers/gpu/drm/radeon/ni_dpm.h @@ -59,8 +59,7 @@ struct ni_mc_reg_table { #define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2 -enum ni_dc_cac_level -{ +enum ni_dc_cac_level { NISLANDS_DCCAC_LEVEL_0 = 0, NISLANDS_DCCAC_LEVEL_1, NISLANDS_DCCAC_LEVEL_2, @@ -72,8 +71,7 @@ enum ni_dc_cac_level NISLANDS_DCCAC_MAX_LEVELS }; -struct ni_leakage_coeffients -{ +struct ni_leakage_coeffients { u32 at; u32 bt; u32 av; @@ -83,8 +81,7 @@ struct ni_leakage_coeffients u32 t_ref; }; -struct ni_cac_data -{ +struct ni_cac_data { struct ni_leakage_coeffients leakage_coefficients; u32 i_leakage; s32 leakage_minimum_temperature; @@ -100,8 +97,7 @@ struct ni_cac_data u8 lts_truncate_n; }; -struct ni_cac_weights -{ +struct ni_cac_weights { u32 weight_tcp_sig0; u32 weight_tcp_sig1; u32 weight_ta_sig; diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h index 42f3bab0f9ee..097893c38915 100644 --- a/drivers/gpu/drm/radeon/nislands_smc.h +++ b/drivers/gpu/drm/radeon/nislands_smc.h @@ -27,8 +27,7 @@ #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -struct PP_NIslands_Dpm2PerfLevel -{ +struct PP_NIslands_Dpm2PerfLevel { uint8_t MaxPS; uint8_t TgtAct; uint8_t MaxPS_StepInc; @@ -44,8 +43,7 @@ struct PP_NIslands_Dpm2PerfLevel typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; -struct PP_NIslands_DPM2Parameters -{ +struct PP_NIslands_DPM2Parameters { uint32_t TDPLimit; uint32_t NearTDPLimit; uint32_t SafePowerLimit; @@ -53,8 +51,7 @@ struct PP_NIslands_DPM2Parameters }; typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; -struct NISLANDS_SMC_SCLK_VALUE -{ +struct NISLANDS_SMC_SCLK_VALUE { uint32_t vCG_SPLL_FUNC_CNTL; uint32_t vCG_SPLL_FUNC_CNTL_2; uint32_t vCG_SPLL_FUNC_CNTL_3; @@ -66,8 +63,7 @@ struct NISLANDS_SMC_SCLK_VALUE typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; -struct NISLANDS_SMC_MCLK_VALUE -{ +struct NISLANDS_SMC_MCLK_VALUE { uint32_t vMPLL_FUNC_CNTL; uint32_t vMPLL_FUNC_CNTL_1; uint32_t vMPLL_FUNC_CNTL_2; @@ -84,8 +80,7 @@ struct NISLANDS_SMC_MCLK_VALUE typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; -struct NISLANDS_SMC_VOLTAGE_VALUE -{ +struct NISLANDS_SMC_VOLTAGE_VALUE { uint16_t value; uint8_t index; uint8_t padding; @@ -93,8 +88,7 @@ struct NISLANDS_SMC_VOLTAGE_VALUE typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; -struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ +struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL { uint8_t arbValue; uint8_t ACIndex; uint8_t displayWatermark; @@ -132,8 +126,7 @@ struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; -struct NISLANDS_SMC_SWSTATE -{ +struct NISLANDS_SMC_SWSTATE { uint8_t flags; uint8_t levelCount; uint8_t padding2; @@ -156,8 +149,7 @@ struct NISLANDS_SMC_SWSTATE_SINGLE { #define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 #define NISLANDS_SMC_VOLTAGEMASK_MAX 4 -struct NISLANDS_SMC_VOLTAGEMASKTABLE -{ +struct NISLANDS_SMC_VOLTAGEMASKTABLE { uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; }; @@ -166,8 +158,7 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; #define NISLANDS_MAX_NO_VREG_STEPS 32 -struct NISLANDS_SMC_STATETABLE -{ +struct NISLANDS_SMC_STATETABLE { uint8_t thermalProtectType; uint8_t systemFlags; uint8_t maxVDDCIndexInPPTable; @@ -203,8 +194,7 @@ typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; #define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16 #define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4 -struct SMC_NISLANDS_MC_TPP_CAC_TABLE -{ +struct SMC_NISLANDS_MC_TPP_CAC_TABLE { uint32_t tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES]; uint32_t cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES]; }; @@ -212,8 +202,7 @@ struct SMC_NISLANDS_MC_TPP_CAC_TABLE typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE; -struct PP_NIslands_CACTABLES -{ +struct PP_NIslands_CACTABLES { uint32_t cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES]; uint32_t cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; @@ -257,8 +246,7 @@ typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES; #define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 #define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 -struct SMC_NIslands_MCRegisterAddress -{ +struct SMC_NIslands_MCRegisterAddress { uint16_t s0; uint16_t s1; }; @@ -266,15 +254,13 @@ struct SMC_NIslands_MCRegisterAddress typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; -struct SMC_NIslands_MCRegisterSet -{ +struct SMC_NIslands_MCRegisterSet { uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; }; typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; -struct SMC_NIslands_MCRegisters -{ +struct SMC_NIslands_MCRegisters { uint8_t last; uint8_t reserved[3]; SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; @@ -283,8 +269,7 @@ struct SMC_NIslands_MCRegisters typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; -struct SMC_NIslands_MCArbDramTimingRegisterSet -{ +struct SMC_NIslands_MCArbDramTimingRegisterSet { uint32_t mc_arb_dram_timing; uint32_t mc_arb_dram_timing2; uint8_t mc_arb_rfsh_rate; @@ -293,8 +278,7 @@ struct SMC_NIslands_MCArbDramTimingRegisterSet typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet; -struct SMC_NIslands_MCArbDramTimingRegisters -{ +struct SMC_NIslands_MCArbDramTimingRegisters { uint8_t arb_current; uint8_t reserved[3]; SMC_NIslands_MCArbDramTimingRegisterSet data[20]; @@ -302,8 +286,7 @@ struct SMC_NIslands_MCArbDramTimingRegisters typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters; -struct SMC_NISLANDS_SPLL_DIV_TABLE -{ +struct SMC_NISLANDS_SPLL_DIV_TABLE { uint32_t freq[256]; uint32_t ss[256]; }; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index cfeca2694d5f..86b8b770af19 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1327,7 +1327,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, return -EINVAL; } track->num_arrays = c; - for (i = 0; i < (c - 1); i+=2, idx+=3) { + for (i = 0; i < (c - 1); i += 2, idx += 3) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { DRM_ERROR("No reloc for packet3 %d\n", diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 9d341cff63ee..d776f929d5c3 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -825,7 +825,7 @@ # define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13) # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13) # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13) -# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) ) +# define R300_TX_MIN_FILTER_MASK ((15 << 11) | (3 << 13)) # define R300_TX_MAX_ANISO_1_TO_1 (0 << 21) # define R300_TX_MAX_ANISO_2_TO_1 (2 << 21) # define R300_TX_MAX_ANISO_4_TO_1 (4 << 21) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a17b95eec65f..b5e97d95a19f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -99,8 +99,7 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin"); MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); MODULE_FIRMWARE("radeon/SUMO2_me.bin"); -static const u32 crtc_offsets[2] = -{ +static const u32 crtc_offsets[2] = { 0, AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL }; diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 9d2bcb9551e6..64980a61d38a 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -28,8 +28,7 @@ #include "r600_dpm.h" #include "atom.h" -const u32 r600_utc[R600_PM_NUMBER_OF_TC] = -{ +const u32 r600_utc[R600_PM_NUMBER_OF_TC] = { R600_UTC_DFLT_00, R600_UTC_DFLT_01, R600_UTC_DFLT_02, @@ -47,8 +46,7 @@ const u32 r600_utc[R600_PM_NUMBER_OF_TC] = R600_UTC_DFLT_14, }; -const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = -{ +const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = { R600_DTC_DFLT_00, R600_DTC_DFLT_01, R600_DTC_DFLT_02, diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 6e4d22ed2a00..5c2513c84c48 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h @@ -119,8 +119,7 @@ enum r600_display_watermark { R600_DISPLAY_WATERMARK_HIGH = 1, }; -enum r600_display_gap -{ +enum r600_display_gap { R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, R600_PM_DISPLAY_GAP_VBLANK = 1, R600_PM_DISPLAY_GAP_WATERMARK = 2, diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3d3d2109dfeb..3e5ff17e3caf 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1355,14 +1355,12 @@ struct radeon_dpm_thermal { bool high_to_low; }; -enum radeon_clk_action -{ +enum radeon_clk_action { RADEON_SCLK_UP = 1, RADEON_SCLK_DOWN }; -struct radeon_blacklist_clocks -{ +struct radeon_blacklist_clocks { u32 sclk; u32 mclk; enum radeon_clk_action action; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 802b5af19261..b5a0109b2e2c 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2400,10 +2400,10 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RS880: rdev->asic = &rs780_asic; /* 760G/780V/880V don't have UVD */ - if ((rdev->pdev->device == 0x9616)|| - (rdev->pdev->device == 0x9611)|| - (rdev->pdev->device == 0x9613)|| - (rdev->pdev->device == 0x9711)|| + if ((rdev->pdev->device == 0x9616) || + (rdev->pdev->device == 0x9611) || + (rdev->pdev->device == 0x9613) || + (rdev->pdev->device == 0x9711) || (rdev->pdev->device == 0x9713)) rdev->has_uvd = false; else diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 3596ea4a8b60..bb1f0a3371ab 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2852,7 +2852,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, args.v1.ucAction = clock_type; args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); dividers->post_div = args.v1.ucPostDiv; dividers->fb_div = args.v1.ucFbDiv; @@ -2866,7 +2866,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, args.v2.ucAction = clock_type; args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); dividers->post_div = args.v2.ucPostDiv; dividers->fb_div = le16_to_cpu(args.v2.usFbDiv); @@ -2881,7 +2881,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, if (clock_type == COMPUTE_ENGINE_PLL_PARAM) { args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); dividers->post_div = args.v3.ucPostDiv; dividers->enable_post_div = (args.v3.ucCntlFlag & @@ -2901,7 +2901,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, if (strobe_mode) args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); dividers->post_div = args.v5.ucPostDiv; dividers->enable_post_div = (args.v5.ucCntlFlag & @@ -2920,7 +2920,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, /* fusion */ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); dividers->post_divider = dividers->post_div = args.v4.ucPostDiv; dividers->real_clock = le32_to_cpu(args.v4.ulClock); @@ -2931,7 +2931,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, args.v6_in.ulClock.ulComputeClockFlag = clock_type; args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv); dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac); @@ -2972,7 +2972,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev, if (strobe_mode) args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac); mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv); @@ -3005,7 +3005,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) args.ucEnable = enable; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) @@ -3013,7 +3013,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) GET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); return le32_to_cpu(args.ulReturnEngineClock); } @@ -3022,7 +3022,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) GET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); return le32_to_cpu(args.ulReturnMemoryClock); } @@ -3034,7 +3034,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev, args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void radeon_atom_set_memory_clock(struct radeon_device *rdev, @@ -3048,7 +3048,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev, @@ -3067,7 +3067,7 @@ void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev, if (mem_clock) args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void radeon_atom_update_memory_dll(struct radeon_device *rdev, @@ -3078,7 +3078,7 @@ void radeon_atom_update_memory_dll(struct radeon_device *rdev, args = cpu_to_le32(mem_clock); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } void radeon_atom_set_ac_timing(struct radeon_device *rdev, @@ -3090,7 +3090,7 @@ void radeon_atom_set_ac_timing(struct radeon_device *rdev, args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */ - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } union set_voltage { @@ -3134,7 +3134,7 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v return; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); } int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, @@ -3155,7 +3155,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, args.v2.ucVoltageMode = 0; args.v2.usVoltageLevel = 0; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *voltage = le16_to_cpu(args.v2.usVoltageLevel); break; @@ -3164,7 +3164,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; args.v3.usVoltageLevel = cpu_to_le16(voltage_id); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *voltage = le16_to_cpu(args.v3.usVoltageLevel); break; @@ -3200,7 +3200,7 @@ int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev, args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID; args.v3.usVoltageLevel = 0; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *leakage_id = le16_to_cpu(args.v3.usVoltageLevel); break; @@ -3327,7 +3327,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev, args.in.ulSCLKFreq = cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *voltage = le16_to_cpu(args.evv_out.usVoltageLevel); @@ -3353,7 +3353,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK; args.v2.usVoltageLevel = cpu_to_le16(voltage_level); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *gpio_mask = le32_to_cpu(*(u32 *)&args.v2); @@ -3361,7 +3361,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL; args.v2.usVoltageLevel = cpu_to_le16(voltage_level); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); *gpio_value = le32_to_cpu(*(u32 *)&args.v2); break; diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 595354e3ce0b..f557535c1d7b 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -61,19 +61,23 @@ struct atpx_mux { u16 mux; } __packed; -bool radeon_has_atpx(void) { +bool radeon_has_atpx(void) +{ return radeon_atpx_priv.atpx_detected; } -bool radeon_has_atpx_dgpu_power_cntl(void) { +bool radeon_has_atpx_dgpu_power_cntl(void) +{ return radeon_atpx_priv.atpx.functions.power_cntl; } -bool radeon_is_atpx_hybrid(void) { +bool radeon_is_atpx_hybrid(void) +{ return radeon_atpx_priv.atpx.is_hybrid; } -bool radeon_atpx_dgpu_req_power_for_displays(void) { +bool radeon_atpx_dgpu_req_power_for_displays(void) +{ return radeon_atpx_priv.atpx.dgpu_req_power_for_displays; } diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 91b58fbc2be7..74753bb26d33 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -37,15 +37,14 @@ void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, u8 enable_mask); -struct r600_audio_pin* r600_audio_get_pin(struct radeon_device *rdev); -struct r600_audio_pin* dce6_audio_get_pin(struct radeon_device *rdev); +struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); +struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode); static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode); -static const u32 pin_offsets[7] = -{ +static const u32 pin_offsets[7] = { (0x5e00 - 0x5e00), (0x5e18 - 0x5e00), (0x5e30 - 0x5e00), @@ -361,7 +360,7 @@ static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, radeon_encoder->audio->write_latency_fields(encoder, connector, mode); } -struct r600_audio_pin* radeon_audio_get_pin(struct drm_encoder *encoder) +struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -528,7 +527,7 @@ static void radeon_audio_calc_cts(unsigned int clock, int *CTS, int *N, int freq *N, *CTS, freq); } -static const struct radeon_hdmi_acr* radeon_audio_acr(unsigned int clock) +static const struct radeon_hdmi_acr *radeon_audio_acr(unsigned int clock) { static struct radeon_hdmi_acr res; u8 i; diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h index dacaaa007051..a073dadd0638 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.h +++ b/drivers/gpu/drm/radeon/radeon_audio.h @@ -34,8 +34,7 @@ struct cea_sad; #define WREG32_ENDPOINT(block, reg, v) \ radeon_audio_endpoint_wreg(rdev, (block), (reg), (v)) -struct radeon_audio_basic_funcs -{ +struct radeon_audio_basic_funcs { u32 (*endpoint_rreg)(struct radeon_device *rdev, u32 offset, u32 reg); void (*endpoint_wreg)(struct radeon_device *rdev, u32 offset, u32 reg, u32 v); @@ -43,8 +42,7 @@ struct radeon_audio_basic_funcs struct r600_audio_pin *pin, u8 enable_mask); }; -struct radeon_audio_funcs -{ +struct radeon_audio_funcs { void (*select_pin)(struct drm_encoder *encoder); struct r600_audio_pin* (*get_pin)(struct radeon_device *rdev); void (*write_latency_fields)(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 59c4db13d90a..546381a5c918 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -603,8 +603,7 @@ struct atom_memory_info { #define MAX_AC_TIMING_ENTRIES 16 -struct atom_memory_clock_range_table -{ +struct atom_memory_clock_range_table { u8 num_entries; u8 rsv[3]; u32 mclk[MAX_AC_TIMING_ENTRIES]; @@ -632,14 +631,12 @@ struct atom_mc_reg_table { #define MAX_VOLTAGE_ENTRIES 32 -struct atom_voltage_table_entry -{ +struct atom_voltage_table_entry { u16 value; u32 smio_low; }; -struct atom_voltage_table -{ +struct atom_voltage_table { u32 count; u32 mask_low; u32 phase_delay; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index b73fd9ab0252..4482c8c5f5ce 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -587,7 +587,7 @@ static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev, int err; int value; - if(!rdev->asic->dpm.fan_ctrl_set_mode) + if (!rdev->asic->dpm.fan_ctrl_set_mode) return -EINVAL; err = kstrtoint(buf, 10, &value); @@ -789,7 +789,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* Skip vddc attribute if get_current_vddc is not implemented */ - if(attr == &sensor_dev_attr_in0_input.dev_attr.attr && + if (attr == &sensor_dev_attr_in0_input.dev_attr.attr && !rdev->asic->dpm.get_current_vddc) return 0; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 922a29e58880..d7f552d441ab 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -86,7 +86,7 @@ int rs400_gart_init(struct radeon_device *rdev) return 0; } /* Check gart size */ - switch(rdev->mc.gtt_size / (1024 * 1024)) { + switch (rdev->mc.gtt_size / (1024 * 1024)) { case 32: case 64: case 128: @@ -116,7 +116,7 @@ int rs400_gart_enable(struct radeon_device *rdev) tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); /* Check gart size */ - switch(rdev->mc.gtt_size / (1024 * 1024)) { + switch (rdev->mc.gtt_size / (1024 * 1024)) { case 32: size_reg = RS480_VA_SIZE_32MB; break; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 8cf87a0a2b2a..5c162778899b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -54,8 +54,7 @@ static void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); -static const u32 crtc_offsets[2] = -{ +static const u32 crtc_offsets[2] = { 0, AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL }; diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 76260fdfbaa7..79709d26d983 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -42,8 +42,7 @@ static void rv515_gpu_init(struct radeon_device *rdev); int rv515_mc_wait_for_idle(struct radeon_device *rdev); -static const u32 crtc_offsets[2] = -{ +static const u32 crtc_offsets[2] = { 0, AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL }; diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.h b/drivers/gpu/drm/radeon/rv6xx_dpm.h index 8035d53ebea6..020c0dc8361d 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.h +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.h @@ -28,8 +28,7 @@ #include "r600_dpm.h" /* Represents a single SCLK step. */ -struct rv6xx_sclk_stepping -{ +struct rv6xx_sclk_stepping { u32 vco_frequency; u32 post_divider; }; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index ef2f1a048cfe..e3e1f6833f12 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -1010,7 +1010,7 @@ int rv770_populate_initial_mvdd_value(struct radeon_device *rdev, struct rv7xx_power_info *pi = rv770_get_pi(rdev); if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) == - (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) { + (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low)) { voltage->index = MVDD_LOW_INDEX; voltage->value = cpu_to_be16(MVDD_LOW_VALUE); } else { @@ -1260,7 +1260,7 @@ static int rv770_construct_vddc_table(struct radeon_device *rdev) pi->vddc_mask_low = gpio_mask; if (i > 0) { if ((pi->vddc_table[i].low_smio != - pi->vddc_table[i - 1].low_smio ) || + pi->vddc_table[i - 1].low_smio) || (pi->vddc_table[i].high_smio != pi->vddc_table[i - 1].high_smio)) vddc_index++; diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h index 3b2c963c4880..d8e8f70135f2 100644 --- a/drivers/gpu/drm/radeon/rv770_smc.h +++ b/drivers/gpu/drm/radeon/rv770_smc.h @@ -31,8 +31,7 @@ #define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 -struct RV770_SMC_SCLK_VALUE -{ +struct RV770_SMC_SCLK_VALUE { uint32_t vCG_SPLL_FUNC_CNTL; uint32_t vCG_SPLL_FUNC_CNTL_2; uint32_t vCG_SPLL_FUNC_CNTL_3; @@ -43,8 +42,7 @@ struct RV770_SMC_SCLK_VALUE typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; -struct RV770_SMC_MCLK_VALUE -{ +struct RV770_SMC_MCLK_VALUE { uint32_t vMPLL_AD_FUNC_CNTL; uint32_t vMPLL_AD_FUNC_CNTL_2; uint32_t vMPLL_DQ_FUNC_CNTL; @@ -59,8 +57,7 @@ struct RV770_SMC_MCLK_VALUE typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; -struct RV730_SMC_MCLK_VALUE -{ +struct RV730_SMC_MCLK_VALUE { uint32_t vMCLK_PWRMGT_CNTL; uint32_t vDLL_CNTL; uint32_t vMPLL_FUNC_CNTL; @@ -73,8 +70,7 @@ struct RV730_SMC_MCLK_VALUE typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; -struct RV770_SMC_VOLTAGE_VALUE -{ +struct RV770_SMC_VOLTAGE_VALUE { uint16_t value; uint8_t index; uint8_t padding; @@ -82,16 +78,14 @@ struct RV770_SMC_VOLTAGE_VALUE typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; -union RV7XX_SMC_MCLK_VALUE -{ +union RV7XX_SMC_MCLK_VALUE { RV770_SMC_MCLK_VALUE mclk770; RV730_SMC_MCLK_VALUE mclk730; }; typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; -struct RV770_SMC_HW_PERFORMANCE_LEVEL -{ +struct RV770_SMC_HW_PERFORMANCE_LEVEL { uint8_t arbValue; union{ uint8_t seqValue; @@ -126,8 +120,7 @@ struct RV770_SMC_HW_PERFORMANCE_LEVEL typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; -struct RV770_SMC_SWSTATE -{ +struct RV770_SMC_SWSTATE { uint8_t flags; uint8_t padding1; uint8_t padding2; @@ -142,8 +135,7 @@ typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; #define RV770_SMC_VOLTAGEMASK_VDDCI 2 #define RV770_SMC_VOLTAGEMASK_MAX 4 -struct RV770_SMC_VOLTAGEMASKTABLE -{ +struct RV770_SMC_VOLTAGEMASKTABLE { uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; }; @@ -152,8 +144,7 @@ typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; #define MAX_NO_VREG_STEPS 32 -struct RV770_SMC_STATETABLE -{ +struct RV770_SMC_STATETABLE { uint8_t thermalProtectType; uint8_t systemFlags; uint8_t maxVDDCIndexInPPTable; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 85e9cba49cec..93f197d96d8f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -138,8 +138,7 @@ static void si_fini_pg(struct radeon_device *rdev); static void si_fini_cg(struct radeon_device *rdev); static void si_rlc_stop(struct radeon_device *rdev); -static const u32 crtc_offsets[] = -{ +static const u32 crtc_offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET, EVERGREEN_CRTC1_REGISTER_OFFSET, EVERGREEN_CRTC2_REGISTER_OFFSET, @@ -148,8 +147,7 @@ static const u32 crtc_offsets[] = EVERGREEN_CRTC5_REGISTER_OFFSET }; -static const u32 si_disp_int_status[] = -{ +static const u32 si_disp_int_status[] = { DISP_INTERRUPT_STATUS, DISP_INTERRUPT_STATUS_CONTINUE, DISP_INTERRUPT_STATUS_CONTINUE2, @@ -162,8 +160,7 @@ static const u32 si_disp_int_status[] = #define DC_HPDx_INT_CONTROL(x) (DC_HPD1_INT_CONTROL + (x * 0xc)) #define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS + (x * 0xc)) -static const u32 verde_rlc_save_restore_register_list[] = -{ +static const u32 verde_rlc_save_restore_register_list[] = { (0x8000 << 16) | (0x98f4 >> 2), 0x00000000, (0x8040 << 16) | (0x98f4 >> 2), @@ -384,8 +381,7 @@ static const u32 verde_rlc_save_restore_register_list[] = 0x00000000 }; -static const u32 tahiti_golden_rlc_registers[] = -{ +static const u32 tahiti_golden_rlc_registers[] = { 0xc424, 0xffffffff, 0x00601005, 0xc47c, 0xffffffff, 0x10104040, 0xc488, 0xffffffff, 0x0100000a, @@ -394,8 +390,7 @@ static const u32 tahiti_golden_rlc_registers[] = 0xf4a8, 0xffffffff, 0x00000000 }; -static const u32 tahiti_golden_registers[] = -{ +static const u32 tahiti_golden_registers[] = { 0x9a10, 0x00010000, 0x00018208, 0x9830, 0xffffffff, 0x00000000, 0x9834, 0xf00fffff, 0x00000400, @@ -429,13 +424,11 @@ static const u32 tahiti_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; -static const u32 tahiti_golden_registers2[] = -{ +static const u32 tahiti_golden_registers2[] = { 0xc64, 0x00000001, 0x00000001 }; -static const u32 pitcairn_golden_rlc_registers[] = -{ +static const u32 pitcairn_golden_rlc_registers[] = { 0xc424, 0xffffffff, 0x00601004, 0xc47c, 0xffffffff, 0x10102020, 0xc488, 0xffffffff, 0x01000020, @@ -443,8 +436,7 @@ static const u32 pitcairn_golden_rlc_registers[] = 0xc30c, 0xffffffff, 0x800000a4 }; -static const u32 pitcairn_golden_registers[] = -{ +static const u32 pitcairn_golden_registers[] = { 0x9a10, 0x00010000, 0x00018208, 0x9830, 0xffffffff, 0x00000000, 0x9834, 0xf00fffff, 0x00000400, @@ -474,8 +466,7 @@ static const u32 pitcairn_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; -static const u32 verde_golden_rlc_registers[] = -{ +static const u32 verde_golden_rlc_registers[] = { 0xc424, 0xffffffff, 0x033f1005, 0xc47c, 0xffffffff, 0x10808020, 0xc488, 0xffffffff, 0x00800008, @@ -483,8 +474,7 @@ static const u32 verde_golden_rlc_registers[] = 0xc30c, 0xffffffff, 0x80010014 }; -static const u32 verde_golden_registers[] = -{ +static const u32 verde_golden_registers[] = { 0x9a10, 0x00010000, 0x00018208, 0x9830, 0xffffffff, 0x00000000, 0x9834, 0xf00fffff, 0x00000400, @@ -539,8 +529,7 @@ static const u32 verde_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; -static const u32 oland_golden_rlc_registers[] = -{ +static const u32 oland_golden_rlc_registers[] = { 0xc424, 0xffffffff, 0x00601005, 0xc47c, 0xffffffff, 0x10104040, 0xc488, 0xffffffff, 0x0100000a, @@ -548,8 +537,7 @@ static const u32 oland_golden_rlc_registers[] = 0xc30c, 0xffffffff, 0x800000f4 }; -static const u32 oland_golden_registers[] = -{ +static const u32 oland_golden_registers[] = { 0x9a10, 0x00010000, 0x00018208, 0x9830, 0xffffffff, 0x00000000, 0x9834, 0xf00fffff, 0x00000400, @@ -579,8 +567,7 @@ static const u32 oland_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; -static const u32 hainan_golden_registers[] = -{ +static const u32 hainan_golden_registers[] = { 0x9a10, 0x00010000, 0x00018208, 0x9830, 0xffffffff, 0x00000000, 0x9834, 0xf00fffff, 0x00000400, @@ -608,13 +595,11 @@ static const u32 hainan_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; -static const u32 hainan_golden_registers2[] = -{ +static const u32 hainan_golden_registers2[] = { 0x98f8, 0xffffffff, 0x02010001 }; -static const u32 tahiti_mgcg_cgcg_init[] = -{ +static const u32 tahiti_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, 0x802c, 0xffffffff, 0xe0000000, 0x9a60, 0xffffffff, 0x00000100, @@ -743,8 +728,7 @@ static const u32 tahiti_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; -static const u32 pitcairn_mgcg_cgcg_init[] = -{ +static const u32 pitcairn_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, 0x802c, 0xffffffff, 0xe0000000, 0x9a60, 0xffffffff, 0x00000100, @@ -841,8 +825,7 @@ static const u32 pitcairn_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; -static const u32 verde_mgcg_cgcg_init[] = -{ +static const u32 verde_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, 0x802c, 0xffffffff, 0xe0000000, 0x9a60, 0xffffffff, 0x00000100, @@ -941,8 +924,7 @@ static const u32 verde_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; -static const u32 oland_mgcg_cgcg_init[] = -{ +static const u32 oland_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, 0x802c, 0xffffffff, 0xe0000000, 0x9a60, 0xffffffff, 0x00000100, @@ -1021,8 +1003,7 @@ static const u32 oland_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; -static const u32 hainan_mgcg_cgcg_init[] = -{ +static const u32 hainan_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, 0x802c, 0xffffffff, 0xe0000000, 0x9a60, 0xffffffff, 0x00000100, @@ -1098,8 +1079,7 @@ static const u32 hainan_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; -static u32 verde_pg_init[] = -{ +static u32 verde_pg_init[] = { 0x353c, 0xffffffff, 0x40000, 0x3538, 0xffffffff, 0x200010ff, 0x353c, 0xffffffff, 0x0, @@ -1768,7 +1748,8 @@ static int si_init_microcode(struct radeon_device *rdev) mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4); break; - default: BUG(); + default: + BUG(); } /* this memory configuration requires special firmware */ diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index fbf968e3f6d7..9deb91970d4d 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -46,8 +46,7 @@ #define SCLK_MIN_DEEPSLEEP_FREQ 1350 -static const struct si_cac_config_reg cac_weights_tahiti[] = -{ +static const struct si_cac_config_reg cac_weights_tahiti[] = { { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, @@ -111,8 +110,7 @@ static const struct si_cac_config_reg cac_weights_tahiti[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg lcac_tahiti[] = -{ +static const struct si_cac_config_reg lcac_tahiti[] = { { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, @@ -203,13 +201,11 @@ static const struct si_cac_config_reg lcac_tahiti[] = }; -static const struct si_cac_config_reg cac_override_tahiti[] = -{ +static const struct si_cac_config_reg cac_override_tahiti[] = { { 0xFFFFFFFF } }; -static const struct si_powertune_data powertune_data_tahiti = -{ +static const struct si_powertune_data powertune_data_tahiti = { ((1 << 16) | 27027), 6, 0, @@ -239,8 +235,7 @@ static const struct si_powertune_data powertune_data_tahiti = true }; -static const struct si_dte_data dte_data_tahiti = -{ +static const struct si_dte_data dte_data_tahiti = { { 1159409, 0, 0, 0, 0 }, { 777, 0, 0, 0, 0 }, 2, @@ -257,8 +252,7 @@ static const struct si_dte_data dte_data_tahiti = false }; -static const struct si_dte_data dte_data_tahiti_pro = -{ +static const struct si_dte_data dte_data_tahiti_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -275,8 +269,7 @@ static const struct si_dte_data dte_data_tahiti_pro = true }; -static const struct si_dte_data dte_data_new_zealand = -{ +static const struct si_dte_data dte_data_new_zealand = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, 0x5, @@ -293,8 +286,7 @@ static const struct si_dte_data dte_data_new_zealand = true }; -static const struct si_dte_data dte_data_aruba_pro = -{ +static const struct si_dte_data dte_data_aruba_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -311,8 +303,7 @@ static const struct si_dte_data dte_data_aruba_pro = true }; -static const struct si_dte_data dte_data_malta = -{ +static const struct si_dte_data dte_data_malta = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -329,8 +320,7 @@ static const struct si_dte_data dte_data_malta = true }; -static struct si_cac_config_reg cac_weights_pitcairn[] = -{ +static struct si_cac_config_reg cac_weights_pitcairn[] = { { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, @@ -394,8 +384,7 @@ static struct si_cac_config_reg cac_weights_pitcairn[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg lcac_pitcairn[] = -{ +static const struct si_cac_config_reg lcac_pitcairn[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, @@ -485,13 +474,11 @@ static const struct si_cac_config_reg lcac_pitcairn[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_override_pitcairn[] = -{ +static const struct si_cac_config_reg cac_override_pitcairn[] = { { 0xFFFFFFFF } }; -static const struct si_powertune_data powertune_data_pitcairn = -{ +static const struct si_powertune_data powertune_data_pitcairn = { ((1 << 16) | 27027), 5, 0, @@ -521,8 +508,7 @@ static const struct si_powertune_data powertune_data_pitcairn = true }; -static const struct si_dte_data dte_data_pitcairn = -{ +static const struct si_dte_data dte_data_pitcairn = { { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0, @@ -539,8 +525,7 @@ static const struct si_dte_data dte_data_pitcairn = false }; -static const struct si_dte_data dte_data_curacao_xt = -{ +static const struct si_dte_data dte_data_curacao_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -557,8 +542,7 @@ static const struct si_dte_data dte_data_curacao_xt = true }; -static const struct si_dte_data dte_data_curacao_pro = -{ +static const struct si_dte_data dte_data_curacao_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -575,8 +559,7 @@ static const struct si_dte_data dte_data_curacao_pro = true }; -static const struct si_dte_data dte_data_neptune_xt = -{ +static const struct si_dte_data dte_data_neptune_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -593,8 +576,7 @@ static const struct si_dte_data dte_data_neptune_xt = true }; -static const struct si_cac_config_reg cac_weights_chelsea_pro[] = -{ +static const struct si_cac_config_reg cac_weights_chelsea_pro[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, @@ -658,8 +640,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_pro[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_chelsea_xt[] = -{ +static const struct si_cac_config_reg cac_weights_chelsea_xt[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, @@ -723,8 +704,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_xt[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_heathrow[] = -{ +static const struct si_cac_config_reg cac_weights_heathrow[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, @@ -788,8 +768,7 @@ static const struct si_cac_config_reg cac_weights_heathrow[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = -{ +static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, @@ -853,8 +832,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_cape_verde[] = -{ +static const struct si_cac_config_reg cac_weights_cape_verde[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, @@ -918,8 +896,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg lcac_cape_verde[] = -{ +static const struct si_cac_config_reg lcac_cape_verde[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, @@ -977,13 +954,11 @@ static const struct si_cac_config_reg lcac_cape_verde[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_override_cape_verde[] = -{ +static const struct si_cac_config_reg cac_override_cape_verde[] = { { 0xFFFFFFFF } }; -static const struct si_powertune_data powertune_data_cape_verde = -{ +static const struct si_powertune_data powertune_data_cape_verde = { ((1 << 16) | 0x6993), 5, 0, @@ -1013,8 +988,7 @@ static const struct si_powertune_data powertune_data_cape_verde = true }; -static const struct si_dte_data dte_data_cape_verde = -{ +static const struct si_dte_data dte_data_cape_verde = { { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0, @@ -1031,8 +1005,7 @@ static const struct si_dte_data dte_data_cape_verde = false }; -static const struct si_dte_data dte_data_venus_xtx = -{ +static const struct si_dte_data dte_data_venus_xtx = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, 5, @@ -1049,8 +1022,7 @@ static const struct si_dte_data dte_data_venus_xtx = true }; -static const struct si_dte_data dte_data_venus_xt = -{ +static const struct si_dte_data dte_data_venus_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, 5, @@ -1067,8 +1039,7 @@ static const struct si_dte_data dte_data_venus_xt = true }; -static const struct si_dte_data dte_data_venus_pro = -{ +static const struct si_dte_data dte_data_venus_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, 5, @@ -1085,8 +1056,7 @@ static const struct si_dte_data dte_data_venus_pro = true }; -static struct si_cac_config_reg cac_weights_oland[] = -{ +static struct si_cac_config_reg cac_weights_oland[] = { { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, @@ -1150,8 +1120,7 @@ static struct si_cac_config_reg cac_weights_oland[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_mars_pro[] = -{ +static const struct si_cac_config_reg cac_weights_mars_pro[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, @@ -1215,8 +1184,7 @@ static const struct si_cac_config_reg cac_weights_mars_pro[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_mars_xt[] = -{ +static const struct si_cac_config_reg cac_weights_mars_xt[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, @@ -1280,8 +1248,7 @@ static const struct si_cac_config_reg cac_weights_mars_xt[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_oland_pro[] = -{ +static const struct si_cac_config_reg cac_weights_oland_pro[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, @@ -1345,8 +1312,7 @@ static const struct si_cac_config_reg cac_weights_oland_pro[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_weights_oland_xt[] = -{ +static const struct si_cac_config_reg cac_weights_oland_xt[] = { { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, @@ -1410,8 +1376,7 @@ static const struct si_cac_config_reg cac_weights_oland_xt[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg lcac_oland[] = -{ +static const struct si_cac_config_reg lcac_oland[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, @@ -1457,8 +1422,7 @@ static const struct si_cac_config_reg lcac_oland[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg lcac_mars_pro[] = -{ +static const struct si_cac_config_reg lcac_mars_pro[] = { { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, @@ -1504,13 +1468,11 @@ static const struct si_cac_config_reg lcac_mars_pro[] = { 0xFFFFFFFF } }; -static const struct si_cac_config_reg cac_override_oland[] = -{ +static const struct si_cac_config_reg cac_override_oland[] = { { 0xFFFFFFFF } }; -static const struct si_powertune_data powertune_data_oland = -{ +static const struct si_powertune_data powertune_data_oland = { ((1 << 16) | 0x6993), 5, 0, @@ -1540,8 +1502,7 @@ static const struct si_powertune_data powertune_data_oland = true }; -static const struct si_powertune_data powertune_data_mars_pro = -{ +static const struct si_powertune_data powertune_data_mars_pro = { ((1 << 16) | 0x6993), 5, 0, @@ -1571,8 +1532,7 @@ static const struct si_powertune_data powertune_data_mars_pro = true }; -static const struct si_dte_data dte_data_oland = -{ +static const struct si_dte_data dte_data_oland = { { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, 0, @@ -1589,8 +1549,7 @@ static const struct si_dte_data dte_data_oland = false }; -static const struct si_dte_data dte_data_mars_pro = -{ +static const struct si_dte_data dte_data_mars_pro = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -1607,8 +1566,7 @@ static const struct si_dte_data dte_data_mars_pro = true }; -static const struct si_dte_data dte_data_sun_xt = -{ +static const struct si_dte_data dte_data_sun_xt = { { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, { 0x0, 0x0, 0x0, 0x0, 0x0 }, 5, @@ -1626,8 +1584,7 @@ static const struct si_dte_data dte_data_sun_xt = }; -static const struct si_cac_config_reg cac_weights_hainan[] = -{ +static const struct si_cac_config_reg cac_weights_hainan[] = { { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, @@ -1691,8 +1648,7 @@ static const struct si_cac_config_reg cac_weights_hainan[] = { 0xFFFFFFFF } }; -static const struct si_powertune_data powertune_data_hainan = -{ +static const struct si_powertune_data powertune_data_hainan = { ((1 << 16) | 0x6993), 5, 0, diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h index aa857906ef93..4887edebd348 100644 --- a/drivers/gpu/drm/radeon/si_dpm.h +++ b/drivers/gpu/drm/radeon/si_dpm.h @@ -26,15 +26,13 @@ #include "ni_dpm.h" #include "sislands_smc.h" -enum si_cac_config_reg_type -{ +enum si_cac_config_reg_type { SISLANDS_CACCONFIG_MMR = 0, SISLANDS_CACCONFIG_CGIND, SISLANDS_CACCONFIG_MAX }; -struct si_cac_config_reg -{ +struct si_cac_config_reg { u32 offset; u32 mask; u32 shift; @@ -42,8 +40,7 @@ struct si_cac_config_reg enum si_cac_config_reg_type type; }; -struct si_powertune_data -{ +struct si_powertune_data { u32 cac_window; u32 l2_lta_window_size_default; u8 lts_truncate_default; @@ -56,8 +53,7 @@ struct si_powertune_data bool enable_powertune_by_default; }; -struct si_dyn_powertune_data -{ +struct si_dyn_powertune_data { u32 cac_leakage; s32 leakage_minimum_temperature; u32 wintime; @@ -68,8 +64,7 @@ struct si_dyn_powertune_data bool disable_uvd_powertune; }; -struct si_dte_data -{ +struct si_dte_data { u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; u32 k; @@ -122,8 +117,7 @@ struct si_mc_reg_table { #define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 #define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3 -struct si_leakage_voltage_entry -{ +struct si_leakage_voltage_entry { u16 voltage; u16 leakage_index; }; @@ -131,8 +125,7 @@ struct si_leakage_voltage_entry #define SISLANDS_LEAKAGE_INDEX0 0xff01 #define SISLANDS_MAX_LEAKAGE_COUNT 4 -struct si_leakage_voltage -{ +struct si_leakage_voltage { u16 count; struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; }; diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h index 75a380a15292..985d720dbc0d 100644 --- a/drivers/gpu/drm/radeon/smu7.h +++ b/drivers/gpu/drm/radeon/smu7.h @@ -82,8 +82,7 @@ #define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT) -struct SMU7_PIDController -{ +struct SMU7_PIDController { uint32_t Ki; int32_t LFWindupUL; int32_t LFWindupLL; @@ -117,8 +116,7 @@ typedef struct SMU7_PIDController SMU7_PIDController; #define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000 #define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 -struct SMU7_Firmware_Header -{ +struct SMU7_Firmware_Header { uint32_t Digest[5]; uint32_t Version; uint32_t HeaderSize; diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h index 0b0b404ff091..1f63cbbd6515 100644 --- a/drivers/gpu/drm/radeon/smu7_discrete.h +++ b/drivers/gpu/drm/radeon/smu7_discrete.h @@ -35,8 +35,7 @@ #define SMU7_NUM_GPU_TES 1 #define SMU7_NUM_NON_TES 2 -struct SMU7_SoftRegisters -{ +struct SMU7_SoftRegisters { uint32_t RefClockFrequency; uint32_t PmTimerP; uint32_t FeatureEnables; @@ -89,8 +88,7 @@ struct SMU7_SoftRegisters typedef struct SMU7_SoftRegisters SMU7_SoftRegisters; -struct SMU7_Discrete_VoltageLevel -{ +struct SMU7_Discrete_VoltageLevel { uint16_t Voltage; uint16_t StdVoltageHiSidd; uint16_t StdVoltageLoSidd; @@ -100,8 +98,7 @@ struct SMU7_Discrete_VoltageLevel typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel; -struct SMU7_Discrete_GraphicsLevel -{ +struct SMU7_Discrete_GraphicsLevel { uint32_t Flags; uint32_t MinVddc; uint32_t MinVddcPhases; @@ -131,8 +128,7 @@ struct SMU7_Discrete_GraphicsLevel typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel; -struct SMU7_Discrete_ACPILevel -{ +struct SMU7_Discrete_ACPILevel { uint32_t Flags; uint32_t MinVddc; uint32_t MinVddcPhases; @@ -153,8 +149,7 @@ struct SMU7_Discrete_ACPILevel typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel; -struct SMU7_Discrete_Ulv -{ +struct SMU7_Discrete_Ulv { uint32_t CcPwrDynRm; uint32_t CcPwrDynRm1; uint16_t VddcOffset; @@ -165,8 +160,7 @@ struct SMU7_Discrete_Ulv typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv; -struct SMU7_Discrete_MemoryLevel -{ +struct SMU7_Discrete_MemoryLevel { uint32_t MinVddc; uint32_t MinVddcPhases; uint32_t MinVddci; @@ -206,8 +200,7 @@ struct SMU7_Discrete_MemoryLevel typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel; -struct SMU7_Discrete_LinkLevel -{ +struct SMU7_Discrete_LinkLevel { uint8_t PcieGenSpeed; uint8_t PcieLaneCount; uint8_t EnabledForActivity; @@ -220,8 +213,7 @@ struct SMU7_Discrete_LinkLevel typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel; -struct SMU7_Discrete_MCArbDramTimingTableEntry -{ +struct SMU7_Discrete_MCArbDramTimingTableEntry { uint32_t McArbDramTiming; uint32_t McArbDramTiming2; uint8_t McArbBurstTime; @@ -230,15 +222,13 @@ struct SMU7_Discrete_MCArbDramTimingTableEntry typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry; -struct SMU7_Discrete_MCArbDramTimingTable -{ +struct SMU7_Discrete_MCArbDramTimingTable { SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; }; typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable; -struct SMU7_Discrete_UvdLevel -{ +struct SMU7_Discrete_UvdLevel { uint32_t VclkFrequency; uint32_t DclkFrequency; uint16_t MinVddc; @@ -250,8 +240,7 @@ struct SMU7_Discrete_UvdLevel typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel; -struct SMU7_Discrete_ExtClkLevel -{ +struct SMU7_Discrete_ExtClkLevel { uint32_t Frequency; uint16_t MinVoltage; uint8_t MinPhases; @@ -260,8 +249,7 @@ struct SMU7_Discrete_ExtClkLevel typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel; -struct SMU7_Discrete_StateInfo -{ +struct SMU7_Discrete_StateInfo { uint32_t SclkFrequency; uint32_t MclkFrequency; uint32_t VclkFrequency; @@ -285,8 +273,7 @@ struct SMU7_Discrete_StateInfo typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo; -struct SMU7_Discrete_DpmTable -{ +struct SMU7_Discrete_DpmTable { SMU7_PIDController GraphicsPIDController; SMU7_PIDController MemoryPIDController; SMU7_PIDController LinkPIDController; @@ -406,23 +393,20 @@ typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable; #define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16 #define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY -struct SMU7_Discrete_MCRegisterAddress -{ +struct SMU7_Discrete_MCRegisterAddress { uint16_t s0; uint16_t s1; }; typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress; -struct SMU7_Discrete_MCRegisterSet -{ +struct SMU7_Discrete_MCRegisterSet { uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; }; typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet; -struct SMU7_Discrete_MCRegisters -{ +struct SMU7_Discrete_MCRegisters { uint8_t last; uint8_t reserved[3]; SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; @@ -431,8 +415,7 @@ struct SMU7_Discrete_MCRegisters typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters; -struct SMU7_Discrete_FanTable -{ +struct SMU7_Discrete_FanTable { uint16_t FdoMode; int16_t TempMin; int16_t TempMed; diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h index 78ada9ffd508..e130f52fe8d6 100644 --- a/drivers/gpu/drm/radeon/smu7_fusion.h +++ b/drivers/gpu/drm/radeon/smu7_fusion.h @@ -36,8 +36,7 @@ #define SMU7_NUM_NON_TES 2 // All 'soft registers' should be uint32_t. -struct SMU7_SoftRegisters -{ +struct SMU7_SoftRegisters { uint32_t RefClockFrequency; uint32_t PmTimerP; uint32_t FeatureEnables; @@ -80,8 +79,7 @@ struct SMU7_SoftRegisters typedef struct SMU7_SoftRegisters SMU7_SoftRegisters; -struct SMU7_Fusion_GraphicsLevel -{ +struct SMU7_Fusion_GraphicsLevel { uint32_t MinVddNb; uint32_t SclkFrequency; @@ -111,8 +109,7 @@ struct SMU7_Fusion_GraphicsLevel typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel; -struct SMU7_Fusion_GIOLevel -{ +struct SMU7_Fusion_GIOLevel { uint8_t EnabledForActivity; uint8_t LclkDid; uint8_t Vid; @@ -137,8 +134,7 @@ struct SMU7_Fusion_GIOLevel typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel; // UVD VCLK/DCLK state (level) definition. -struct SMU7_Fusion_UvdLevel -{ +struct SMU7_Fusion_UvdLevel { uint32_t VclkFrequency; uint32_t DclkFrequency; uint16_t MinVddNb; @@ -155,8 +151,7 @@ struct SMU7_Fusion_UvdLevel typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel; // Clocks for other external blocks (VCE, ACP, SAMU). -struct SMU7_Fusion_ExtClkLevel -{ +struct SMU7_Fusion_ExtClkLevel { uint32_t Frequency; uint16_t MinVoltage; uint8_t Divider; @@ -166,8 +161,7 @@ struct SMU7_Fusion_ExtClkLevel }; typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel; -struct SMU7_Fusion_ACPILevel -{ +struct SMU7_Fusion_ACPILevel { uint32_t Flags; uint32_t MinVddNb; uint32_t SclkFrequency; @@ -181,8 +175,7 @@ struct SMU7_Fusion_ACPILevel typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel; -struct SMU7_Fusion_NbDpm -{ +struct SMU7_Fusion_NbDpm { uint8_t DpmXNbPsHi; uint8_t DpmXNbPsLo; uint8_t Dpm0PgNbPsHi; @@ -197,8 +190,7 @@ struct SMU7_Fusion_NbDpm typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm; -struct SMU7_Fusion_StateInfo -{ +struct SMU7_Fusion_StateInfo { uint32_t SclkFrequency; uint32_t LclkFrequency; uint32_t VclkFrequency; @@ -214,8 +206,7 @@ struct SMU7_Fusion_StateInfo typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo; -struct SMU7_Fusion_DpmTable -{ +struct SMU7_Fusion_DpmTable { uint32_t SystemFlags; SMU7_PIDController GraphicsPIDController; @@ -230,12 +221,12 @@ struct SMU7_Fusion_DpmTable uint8_t SamuLevelCount; uint16_t FpsHighT; - SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE]; + SMU7_Fusion_GraphicsLevel GraphicsLevel[SMU__NUM_SCLK_DPM_STATE]; SMU7_Fusion_ACPILevel ACPILevel; - SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD]; - SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE]; - SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP]; - SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU]; + SMU7_Fusion_UvdLevel UvdLevel[SMU7_MAX_LEVELS_UVD]; + SMU7_Fusion_ExtClkLevel VceLevel[SMU7_MAX_LEVELS_VCE]; + SMU7_Fusion_ExtClkLevel AcpLevel[SMU7_MAX_LEVELS_ACP]; + SMU7_Fusion_ExtClkLevel SamuLevel[SMU7_MAX_LEVELS_SAMU]; uint8_t UvdBootLevel; uint8_t VceBootLevel; @@ -266,10 +257,9 @@ struct SMU7_Fusion_DpmTable }; -struct SMU7_Fusion_GIODpmTable -{ +struct SMU7_Fusion_GIODpmTable { - SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO]; + SMU7_Fusion_GIOLevel GIOLevel[SMU7_MAX_LEVELS_GIO]; SMU7_PIDController GioPIDController; diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index d49c145db437..21d27e6235f3 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -33,8 +33,7 @@ #define SUMO_MINIMUM_ENGINE_CLOCK 800 #define BOOST_DPM_LEVEL 7 -static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = -{ +static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = { SUMO_UTC_DFLT_00, SUMO_UTC_DFLT_01, SUMO_UTC_DFLT_02, @@ -52,8 +51,7 @@ static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = SUMO_UTC_DFLT_14, }; -static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = -{ +static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = { SUMO_DTC_DFLT_00, SUMO_DTC_DFLT_01, SUMO_DTC_DFLT_02, @@ -109,11 +107,11 @@ static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable) local1 = RREG32(CG_CGTT_LOCAL_1); if (enable) { - WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) ); - WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) ); + WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK)); + WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK)); } else { - WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) ); - WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) ); + WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK)); + WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK)); } } @@ -702,9 +700,9 @@ static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev, u32 nbps1_new = 0; if (old_ps != NULL) - nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0; + nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; - nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0; + nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0; if (nbps1_old == 0 && nbps1_new == 1) sumo_smu_notify_alt_vddnb_change(rdev, 1, 1); diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index ef1cc7bad20a..b9a2c7ccc881 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -39,8 +39,7 @@ #ifndef TRINITY_MGCG_SEQUENCE #define TRINITY_MGCG_SEQUENCE 100 -static const u32 trinity_mgcg_shls_default[] = -{ +static const u32 trinity_mgcg_shls_default[] = { /* Register, Value, Mask */ 0x0000802c, 0xc0000000, 0xffffffff, 0x00003fc4, 0xc0000000, 0xffffffff, @@ -122,8 +121,7 @@ static const u32 trinity_mgcg_shls_default[] = #ifndef TRINITY_SYSLS_SEQUENCE #define TRINITY_SYSLS_SEQUENCE 100 -static const u32 trinity_sysls_disable[] = -{ +static const u32 trinity_sysls_disable[] = { /* Register, Value, Mask */ 0x0000d0c0, 0x00000000, 0xffffffff, 0x0000d8c0, 0x00000000, 0xffffffff, @@ -146,8 +144,7 @@ static const u32 trinity_sysls_disable[] = 0x00006dfc, 0x0000007f, 0xffffffff }; -static const u32 trinity_sysls_enable[] = -{ +static const u32 trinity_sysls_enable[] = { /* Register, Value, Mask */ 0x000055e8, 0x00000001, 0xffffffff, 0x0000d0bc, 0x00000100, 0xffffffff, @@ -169,8 +166,7 @@ static const u32 trinity_sysls_enable[] = }; #endif -static const u32 trinity_override_mgpg_sequences[] = -{ +static const u32 trinity_override_mgpg_sequences[] = { /* Register, Value */ 0x00000200, 0xE030032C, 0x00000204, 0x00000FFF, @@ -366,9 +362,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev, local1 = RREG32_CG(CG_CGTT_LOCAL_1); WREG32_CG(CG_CGTT_LOCAL_0, - (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) ); + (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK)); WREG32_CG(CG_CGTT_LOCAL_1, - (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) ); + (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK)); WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE); } else { @@ -378,9 +374,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev, local1 = RREG32_CG(CG_CGTT_LOCAL_1); WREG32_CG(CG_CGTT_LOCAL_0, - CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) ); + CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK)); WREG32_CG(CG_CGTT_LOCAL_1, - CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) ); + CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK)); } } @@ -1434,7 +1430,7 @@ static void trinity_adjust_uvd_state(struct radeon_device *rdev, if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) { high_index = trinity_get_uvd_clock_index(rdev, rps); - switch(high_index) { + switch (high_index) { case 3: case 2: low_index = 1; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h index c261657750ca..431e2b68d21e 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.h +++ b/drivers/gpu/drm/radeon/trinity_dpm.h @@ -64,8 +64,7 @@ struct trinity_ps { #define TRINITY_NUM_NBPSTATES 4 -struct trinity_uvd_clock_table_entry -{ +struct trinity_uvd_clock_table_entry { u32 vclk; u32 dclk; u8 vclk_did; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 58557c2263a7..5684639d20a6 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -142,7 +142,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev) addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); - WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr)); + WREG32(UVD_FW_START, *((uint32_t *)rdev->uvd.cpu_addr)); return 0; } diff --git a/drivers/gpu/drm/renesas/Kconfig b/drivers/gpu/drm/renesas/Kconfig index 3777dad17f81..21862a8ef710 100644 --- a/drivers/gpu/drm/renesas/Kconfig +++ b/drivers/gpu/drm/renesas/Kconfig @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only source "drivers/gpu/drm/renesas/rcar-du/Kconfig" +source "drivers/gpu/drm/renesas/rz-du/Kconfig" source "drivers/gpu/drm/renesas/shmobile/Kconfig" diff --git a/drivers/gpu/drm/renesas/Makefile b/drivers/gpu/drm/renesas/Makefile index ec0e89e7a592..b8d8bc53967f 100644 --- a/drivers/gpu/drm/renesas/Makefile +++ b/drivers/gpu/drm/renesas/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += rcar-du/ +obj-y += rz-du/ obj-$(CONFIG_DRM_SHMOBILE) += shmobile/ diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig new file mode 100644 index 000000000000..5f0db2c5fee6 --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +config DRM_RZG2L_DU + tristate "DRM Support for RZ/G2L Display Unit" + depends on ARCH_RZG2L || COMPILE_TEST + depends on DRM && OF + depends on VIDEO_RENESAS_VSP1 + select DRM_GEM_DMA_HELPER + select DRM_KMS_HELPER + select VIDEOMODE_HELPERS + help + Choose this option if you have an RZ/G2L alike chipset. + If M is selected the module will be called rzg2l-du-drm. diff --git a/drivers/gpu/drm/renesas/rz-du/Makefile b/drivers/gpu/drm/renesas/rz-du/Makefile new file mode 100644 index 000000000000..663b82a2577f --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +rzg2l-du-drm-y := rzg2l_du_crtc.o \ + rzg2l_du_drv.o \ + rzg2l_du_encoder.o \ + rzg2l_du_kms.o \ + +rzg2l-du-drm-$(CONFIG_VIDEO_RENESAS_VSP1) += rzg2l_du_vsp.o +obj-$(CONFIG_DRM_RZG2L_DU) += rzg2l-du-drm.o diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c new file mode 100644 index 000000000000..6e7aac6219be --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RZ/G2L Display Unit CRTCs + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_crtc.c + */ + +#include <linux/clk.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_vblank.h> + +#include "rzg2l_du_crtc.h" +#include "rzg2l_du_drv.h" +#include "rzg2l_du_encoder.h" +#include "rzg2l_du_kms.h" +#include "rzg2l_du_vsp.h" + +#define DU_MCR0 0x00 +#define DU_MCR0_DI_EN BIT(8) + +#define DU_DITR0 0x10 +#define DU_DITR0_DEMD_HIGH (BIT(8) | BIT(9)) +#define DU_DITR0_VSPOL BIT(16) +#define DU_DITR0_HSPOL BIT(17) + +#define DU_DITR1 0x14 +#define DU_DITR1_VSA(x) ((x) << 0) +#define DU_DITR1_VACTIVE(x) ((x) << 16) + +#define DU_DITR2 0x18 +#define DU_DITR2_VBP(x) ((x) << 0) +#define DU_DITR2_VFP(x) ((x) << 16) + +#define DU_DITR3 0x1c +#define DU_DITR3_HSA(x) ((x) << 0) +#define DU_DITR3_HACTIVE(x) ((x) << 16) + +#define DU_DITR4 0x20 +#define DU_DITR4_HBP(x) ((x) << 0) +#define DU_DITR4_HFP(x) ((x) << 16) + +#define DU_MCR1 0x40 +#define DU_MCR1_PB_AUTOCLR BIT(16) + +#define DU_PBCR0 0x4c +#define DU_PBCR0_PB_DEP(x) ((x) << 0) + +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + +static void rzg2l_du_crtc_set_display_timing(struct rzg2l_du_crtc *rcrtc) +{ + const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; + unsigned long mode_clock = mode->clock * 1000; + u32 ditr0, ditr1, ditr2, ditr3, ditr4, pbcr0; + struct rzg2l_du_device *rcdu = rcrtc->dev; + + clk_prepare_enable(rcrtc->rzg2l_clocks.dclk); + clk_set_rate(rcrtc->rzg2l_clocks.dclk, mode_clock); + + ditr0 = (DU_DITR0_DEMD_HIGH + | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DU_DITR0_VSPOL : 0) + | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DU_DITR0_HSPOL : 0)); + + ditr1 = DU_DITR1_VSA(mode->vsync_end - mode->vsync_start) + | DU_DITR1_VACTIVE(mode->vdisplay); + + ditr2 = DU_DITR2_VBP(mode->vtotal - mode->vsync_end) + | DU_DITR2_VFP(mode->vsync_start - mode->vdisplay); + + ditr3 = DU_DITR3_HSA(mode->hsync_end - mode->hsync_start) + | DU_DITR3_HACTIVE(mode->hdisplay); + + ditr4 = DU_DITR4_HBP(mode->htotal - mode->hsync_end) + | DU_DITR4_HFP(mode->hsync_start - mode->hdisplay); + + pbcr0 = DU_PBCR0_PB_DEP(0x1f); + + writel(ditr0, rcdu->mmio + DU_DITR0); + writel(ditr1, rcdu->mmio + DU_DITR1); + writel(ditr2, rcdu->mmio + DU_DITR2); + writel(ditr3, rcdu->mmio + DU_DITR3); + writel(ditr4, rcdu->mmio + DU_DITR4); + writel(pbcr0, rcdu->mmio + DU_PBCR0); + + /* Enable auto clear */ + writel(DU_MCR1_PB_AUTOCLR, rcdu->mmio + DU_MCR1); +} + +/* ----------------------------------------------------------------------------- + * Page Flip + */ + +void rzg2l_du_crtc_finish_page_flip(struct rzg2l_du_crtc *rcrtc) +{ + struct drm_pending_vblank_event *event; + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = rcrtc->event; + rcrtc->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (!event) + return; + + spin_lock_irqsave(&dev->event_lock, flags); + drm_crtc_send_vblank_event(&rcrtc->crtc, event); + wake_up(&rcrtc->flip_wait); + spin_unlock_irqrestore(&dev->event_lock, flags); + + drm_crtc_vblank_put(&rcrtc->crtc); +} + +static bool rzg2l_du_crtc_page_flip_pending(struct rzg2l_du_crtc *rcrtc) +{ + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; + bool pending; + + spin_lock_irqsave(&dev->event_lock, flags); + pending = rcrtc->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + return pending; +} + +static void rzg2l_du_crtc_wait_page_flip(struct rzg2l_du_crtc *rcrtc) +{ + struct rzg2l_du_device *rcdu = rcrtc->dev; + + if (wait_event_timeout(rcrtc->flip_wait, + !rzg2l_du_crtc_page_flip_pending(rcrtc), + msecs_to_jiffies(50))) + return; + + dev_warn(rcdu->dev, "page flip timeout\n"); + + rzg2l_du_crtc_finish_page_flip(rcrtc); +} + +/* ----------------------------------------------------------------------------- + * Start/Stop and Suspend/Resume + */ + +static void rzg2l_du_crtc_setup(struct rzg2l_du_crtc *rcrtc) +{ + /* Configure display timings and output routing */ + rzg2l_du_crtc_set_display_timing(rcrtc); + + /* Enable the VSP compositor. */ + rzg2l_du_vsp_enable(rcrtc); + + /* Turn vertical blanking interrupt reporting on. */ + drm_crtc_vblank_on(&rcrtc->crtc); +} + +static int rzg2l_du_crtc_get(struct rzg2l_du_crtc *rcrtc) +{ + int ret; + + /* + * Guard against double-get, as the function is called from both the + * .atomic_enable() and .atomic_flush() handlers. + */ + if (rcrtc->initialized) + return 0; + + ret = clk_prepare_enable(rcrtc->rzg2l_clocks.aclk); + if (ret < 0) + return ret; + + ret = clk_prepare_enable(rcrtc->rzg2l_clocks.pclk); + if (ret < 0) + goto error_bus_clock; + + ret = reset_control_deassert(rcrtc->rstc); + if (ret < 0) + goto error_peri_clock; + + rzg2l_du_crtc_setup(rcrtc); + rcrtc->initialized = true; + + return 0; + +error_peri_clock: + clk_disable_unprepare(rcrtc->rzg2l_clocks.pclk); +error_bus_clock: + clk_disable_unprepare(rcrtc->rzg2l_clocks.aclk); + return ret; +} + +static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc) +{ + clk_disable_unprepare(rcrtc->rzg2l_clocks.dclk); + reset_control_assert(rcrtc->rstc); + clk_disable_unprepare(rcrtc->rzg2l_clocks.pclk); + clk_disable_unprepare(rcrtc->rzg2l_clocks.aclk); + + rcrtc->initialized = false; +} + +static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start) +{ + struct rzg2l_du_device *rcdu = rcrtc->dev; + + writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0); +} + +static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc) +{ + rzg2l_du_start_stop(rcrtc, true); +} + +static void rzg2l_du_crtc_stop(struct rzg2l_du_crtc *rcrtc) +{ + struct drm_crtc *crtc = &rcrtc->crtc; + + /* + * Disable vertical blanking interrupt reporting. We first need to wait + * for page flip completion before stopping the CRTC as userspace + * expects page flips to eventually complete. + */ + rzg2l_du_crtc_wait_page_flip(rcrtc); + drm_crtc_vblank_off(crtc); + + /* Disable the VSP compositor. */ + rzg2l_du_vsp_disable(rcrtc); + + rzg2l_du_start_stop(rcrtc, false); +} + +/* ----------------------------------------------------------------------------- + * CRTC Functions + */ + +static void rzg2l_du_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc); + + rzg2l_du_crtc_get(rcrtc); + + rzg2l_du_crtc_start(rcrtc); +} + +static void rzg2l_du_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc); + + rzg2l_du_crtc_stop(rcrtc); + rzg2l_du_crtc_put(rcrtc); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); +} + +static void rzg2l_du_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc); + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; + + WARN_ON(!crtc->state->enable); + + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&dev->event_lock, flags); + rcrtc->event = crtc->state->event; + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + } + + rzg2l_du_vsp_atomic_flush(rcrtc); +} + +static const struct drm_crtc_helper_funcs crtc_helper_funcs = { + .atomic_flush = rzg2l_du_crtc_atomic_flush, + .atomic_enable = rzg2l_du_crtc_atomic_enable, + .atomic_disable = rzg2l_du_crtc_atomic_disable, +}; + +static struct drm_crtc_state * +rzg2l_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct rzg2l_du_crtc_state *state; + struct rzg2l_du_crtc_state *copy; + + if (WARN_ON(!crtc->state)) + return NULL; + + state = to_rzg2l_crtc_state(crtc->state); + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, ©->state); + + return ©->state; +} + +static void rzg2l_du_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + __drm_atomic_helper_crtc_destroy_state(state); + kfree(to_rzg2l_crtc_state(state)); +} + +static void rzg2l_du_crtc_reset(struct drm_crtc *crtc) +{ + struct rzg2l_du_crtc_state *state; + + if (crtc->state) { + rzg2l_du_crtc_atomic_destroy_state(crtc, crtc->state); + crtc->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + + __drm_atomic_helper_crtc_reset(crtc, &state->state); +} + +static int rzg2l_du_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc); + + rcrtc->vblank_enable = true; + + return 0; +} + +static void rzg2l_du_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct rzg2l_du_crtc *rcrtc = to_rzg2l_crtc(crtc); + + rcrtc->vblank_enable = false; +} + +static const struct drm_crtc_funcs crtc_funcs_rz = { + .reset = rzg2l_du_crtc_reset, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = rzg2l_du_crtc_atomic_duplicate_state, + .atomic_destroy_state = rzg2l_du_crtc_atomic_destroy_state, + .enable_vblank = rzg2l_du_crtc_enable_vblank, + .disable_vblank = rzg2l_du_crtc_disable_vblank, +}; + +/* ----------------------------------------------------------------------------- + * Initialization + */ + +int rzg2l_du_crtc_create(struct rzg2l_du_device *rcdu) +{ + struct rzg2l_du_crtc *rcrtc = &rcdu->crtcs[0]; + struct drm_crtc *crtc = &rcrtc->crtc; + struct drm_plane *primary; + int ret; + + rcrtc->rstc = devm_reset_control_get_shared(rcdu->dev, NULL); + if (IS_ERR(rcrtc->rstc)) { + dev_err(rcdu->dev, "can't get cpg reset\n"); + return PTR_ERR(rcrtc->rstc); + } + + rcrtc->rzg2l_clocks.aclk = devm_clk_get(rcdu->dev, "aclk"); + if (IS_ERR(rcrtc->rzg2l_clocks.aclk)) { + dev_err(rcdu->dev, "no axi clock for DU\n"); + return PTR_ERR(rcrtc->rzg2l_clocks.aclk); + } + + rcrtc->rzg2l_clocks.pclk = devm_clk_get(rcdu->dev, "pclk"); + if (IS_ERR(rcrtc->rzg2l_clocks.pclk)) { + dev_err(rcdu->dev, "no peripheral clock for DU\n"); + return PTR_ERR(rcrtc->rzg2l_clocks.pclk); + } + + rcrtc->rzg2l_clocks.dclk = devm_clk_get(rcdu->dev, "vclk"); + if (IS_ERR(rcrtc->rzg2l_clocks.dclk)) { + dev_err(rcdu->dev, "no video clock for DU\n"); + return PTR_ERR(rcrtc->rzg2l_clocks.dclk); + } + + init_waitqueue_head(&rcrtc->flip_wait); + rcrtc->dev = rcdu; + + primary = rzg2l_du_vsp_get_drm_plane(rcrtc, rcrtc->vsp_pipe); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = drmm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL, + &crtc_funcs_rz, NULL); + if (ret < 0) + return ret; + + drm_crtc_helper_add(crtc, &crtc_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h new file mode 100644 index 000000000000..cbba38acc377 --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * RZ/G2L Display Unit CRTCs + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_crtc.h + */ + +#ifndef __RZG2L_DU_CRTC_H__ +#define __RZG2L_DU_CRTC_H__ + +#include <linux/container_of.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/wait.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_writeback.h> + +#include <media/vsp1.h> + +struct clk; +struct reset_control; +struct rzg2l_du_vsp; +struct rzg2l_du_format_info; + +/** + * struct rzg2l_du_crtc - the CRTC, representing a DU superposition processor + * @crtc: base DRM CRTC + * @dev: the DU device + * @initialized: whether the CRTC has been initialized and clocks enabled + * @vblank_enable: whether vblank events are enabled on this CRTC + * @event: event to post when the pending page flip completes + * @flip_wait: wait queue used to signal page flip completion + * @vsp: VSP feeding video to this CRTC + * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC + * @rstc: reset controller + * @rzg2l_clocks: the bus, main and video clock + */ +struct rzg2l_du_crtc { + struct drm_crtc crtc; + + struct rzg2l_du_device *dev; + bool initialized; + + bool vblank_enable; + struct drm_pending_vblank_event *event; + wait_queue_head_t flip_wait; + + struct rzg2l_du_vsp *vsp; + unsigned int vsp_pipe; + + const char *const *sources; + unsigned int sources_count; + + struct reset_control *rstc; + struct { + struct clk *aclk; + struct clk *pclk; + struct clk *dclk; + } rzg2l_clocks; +}; + +static inline struct rzg2l_du_crtc *to_rzg2l_crtc(struct drm_crtc *c) +{ + return container_of(c, struct rzg2l_du_crtc, crtc); +} + +/** + * struct rzg2l_du_crtc_state - Driver-specific CRTC state + * @state: base DRM CRTC state + * @outputs: bitmask of the outputs (enum rzg2l_du_output) driven by this CRTC + */ +struct rzg2l_du_crtc_state { + struct drm_crtc_state state; + unsigned int outputs; +}; + +static inline struct rzg2l_du_crtc_state *to_rzg2l_crtc_state(struct drm_crtc_state *s) +{ + return container_of(s, struct rzg2l_du_crtc_state, state); +} + +int rzg2l_du_crtc_create(struct rzg2l_du_device *rcdu); + +void rzg2l_du_crtc_finish_page_flip(struct rzg2l_du_crtc *rcrtc); + +#endif /* __RZG2L_DU_CRTC_H__ */ diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c new file mode 100644 index 000000000000..470d34da1d6c --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RZ/G2L Display Unit DRM driver + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_drv.c + */ + +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_probe_helper.h> + +#include "rzg2l_du_drv.h" +#include "rzg2l_du_kms.h" + +/* ----------------------------------------------------------------------------- + * Device Information + */ + +static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = { + .channels_mask = BIT(0), + .routes = { + [RZG2L_DU_OUTPUT_DSI0] = { + .possible_outputs = BIT(0), + .port = 0, + }, + [RZG2L_DU_OUTPUT_DPAD0] = { + .possible_outputs = BIT(0), + .port = 1, + } + } +}; + +static const struct of_device_id rzg2l_du_of_table[] = { + { .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, rzg2l_du_of_table); + +const char *rzg2l_du_output_name(enum rzg2l_du_output output) +{ + static const char * const names[] = { + [RZG2L_DU_OUTPUT_DSI0] = "DSI0", + [RZG2L_DU_OUTPUT_DPAD0] = "DPAD0" + }; + + if (output >= ARRAY_SIZE(names)) + return "UNKNOWN"; + + return names[output]; +} + +/* ----------------------------------------------------------------------------- + * DRM operations + */ + +DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops); + +static const struct drm_driver rzg2l_du_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .dumb_create = rzg2l_du_dumb_create, + .fops = &rzg2l_du_fops, + .name = "rzg2l-du", + .desc = "Renesas RZ/G2L Display Unit", + .date = "20230410", + .major = 1, + .minor = 0, +}; + +/* ----------------------------------------------------------------------------- + * Platform driver + */ + +static void rzg2l_du_remove(struct platform_device *pdev) +{ + struct rzg2l_du_device *rcdu = platform_get_drvdata(pdev); + struct drm_device *ddev = &rcdu->ddev; + + drm_dev_unregister(ddev); + drm_atomic_helper_shutdown(ddev); + + drm_kms_helper_poll_fini(ddev); +} + +static void rzg2l_du_shutdown(struct platform_device *pdev) +{ + struct rzg2l_du_device *rcdu = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(&rcdu->ddev); +} + +static int rzg2l_du_probe(struct platform_device *pdev) +{ + struct rzg2l_du_device *rcdu; + int ret; + + if (drm_firmware_drivers_only()) + return -ENODEV; + + /* Allocate and initialize the RZ/G2L device structure. */ + rcdu = devm_drm_dev_alloc(&pdev->dev, &rzg2l_du_driver, + struct rzg2l_du_device, ddev); + if (IS_ERR(rcdu)) + return PTR_ERR(rcdu); + + rcdu->dev = &pdev->dev; + rcdu->info = of_device_get_match_data(rcdu->dev); + + platform_set_drvdata(pdev, rcdu); + + /* I/O resources */ + rcdu->mmio = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rcdu->mmio)) + return PTR_ERR(rcdu->mmio); + + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + /* DRM/KMS objects */ + ret = rzg2l_du_modeset_init(rcdu); + if (ret < 0) { + /* + * Don't use dev_err_probe(), as it would overwrite the probe + * deferral reason recorded in rzg2l_du_modeset_init(). + */ + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to initialize DRM/KMS (%d)\n", ret); + goto error; + } + + /* + * Register the DRM device with the core and the connectors with + * sysfs. + */ + ret = drm_dev_register(&rcdu->ddev, 0); + if (ret) + goto error; + + drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev)); + + drm_fbdev_generic_setup(&rcdu->ddev, 32); + + return 0; + +error: + drm_kms_helper_poll_fini(&rcdu->ddev); + return ret; +} + +static struct platform_driver rzg2l_du_platform_driver = { + .probe = rzg2l_du_probe, + .remove_new = rzg2l_du_remove, + .shutdown = rzg2l_du_shutdown, + .driver = { + .name = "rzg2l-du", + .of_match_table = rzg2l_du_of_table, + }, +}; + +module_platform_driver(rzg2l_du_platform_driver); + +MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/G2L Display Unit DRM Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h new file mode 100644 index 000000000000..58806c2a8f2b --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * RZ/G2L Display Unit DRM driver + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_drv.h + */ + +#ifndef __RZG2L_DU_DRV_H__ +#define __RZG2L_DU_DRV_H__ + +#include <linux/kernel.h> + +#include <drm/drm_device.h> + +#include "rzg2l_du_crtc.h" +#include "rzg2l_du_vsp.h" + +struct device; +struct drm_property; + +enum rzg2l_du_output { + RZG2L_DU_OUTPUT_DSI0, + RZG2L_DU_OUTPUT_DPAD0, + RZG2L_DU_OUTPUT_MAX, +}; + +/* + * struct rzg2l_du_output_routing - Output routing specification + * @possible_outputs: bitmask of possible outputs + * @port: device tree port number corresponding to this output route + * + * The DU has 2 possible outputs (DPAD0, DSI0). Output routing data + * specify the valid SoC outputs, which CRTC can drive the output, and the type + * of in-SoC encoder for the output. + */ +struct rzg2l_du_output_routing { + unsigned int possible_outputs; + unsigned int port; +}; + +/* + * struct rzg2l_du_device_info - DU model-specific information + * @channels_mask: bit mask of available DU channels + * @routes: array of CRTC to output routes, indexed by output (RZG2L_DU_OUTPUT_*) + */ +struct rzg2l_du_device_info { + unsigned int channels_mask; + struct rzg2l_du_output_routing routes[RZG2L_DU_OUTPUT_MAX]; +}; + +#define RZG2L_DU_MAX_CRTCS 1 +#define RZG2L_DU_MAX_VSPS 1 +#define RZG2L_DU_MAX_DSI 1 + +struct rzg2l_du_device { + struct device *dev; + const struct rzg2l_du_device_info *info; + + void __iomem *mmio; + + struct drm_device ddev; + + struct rzg2l_du_crtc crtcs[RZG2L_DU_MAX_CRTCS]; + unsigned int num_crtcs; + + struct rzg2l_du_vsp vsps[RZG2L_DU_MAX_VSPS]; +}; + +static inline struct rzg2l_du_device *to_rzg2l_du_device(struct drm_device *dev) +{ + return container_of(dev, struct rzg2l_du_device, ddev); +} + +const char *rzg2l_du_output_name(enum rzg2l_du_output output); + +#endif /* __RZG2L_DU_DRV_H__ */ diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c new file mode 100644 index 000000000000..339cbaaea0b5 --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RZ/G2L Display Unit Encoder + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_encoder.c + */ + +#include <linux/export.h> +#include <linux/of.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_panel.h> + +#include "rzg2l_du_drv.h" +#include "rzg2l_du_encoder.h" + +/* ----------------------------------------------------------------------------- + * Encoder + */ + +static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = { +}; + +int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu, + enum rzg2l_du_output output, + struct device_node *enc_node) +{ + struct rzg2l_du_encoder *renc; + struct drm_connector *connector; + struct drm_bridge *bridge; + int ret; + + /* Locate the DRM bridge from the DT node. */ + bridge = of_drm_find_bridge(enc_node); + if (!bridge) + return -EPROBE_DEFER; + + dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n", + enc_node, rzg2l_du_output_name(output)); + + renc = drmm_encoder_alloc(&rcdu->ddev, struct rzg2l_du_encoder, base, + &rzg2l_du_encoder_funcs, DRM_MODE_ENCODER_NONE, + NULL); + if (IS_ERR(renc)) + return PTR_ERR(renc); + + renc->output = output; + + /* Attach the bridge to the encoder. */ + ret = drm_bridge_attach(&renc->base, bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(rcdu->dev, + "failed to attach bridge %pOF for output %s (%d)\n", + bridge->of_node, rzg2l_du_output_name(output), ret); + return ret; + } + + /* Create the connector for the chain of bridges. */ + connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base); + if (IS_ERR(connector)) { + dev_err(rcdu->dev, + "failed to created connector for output %s (%ld)\n", + rzg2l_du_output_name(output), PTR_ERR(connector)); + return PTR_ERR(connector); + } + + return drm_connector_attach_encoder(connector, &renc->base); +} diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h new file mode 100644 index 000000000000..3e430c1f6132 --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * RZ/G2L Display Unit Encoder + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_encoder.h + */ + +#ifndef __RZG2L_DU_ENCODER_H__ +#define __RZG2L_DU_ENCODER_H__ + +#include <drm/drm_encoder.h> +#include <linux/container_of.h> + +struct rzg2l_du_device; + +struct rzg2l_du_encoder { + struct drm_encoder base; + enum rzg2l_du_output output; +}; + +static inline struct rzg2l_du_encoder *to_rzg2l_encoder(struct drm_encoder *e) +{ + return container_of(e, struct rzg2l_du_encoder, base); +} + +int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu, + enum rzg2l_du_output output, + struct device_node *enc_node); + +#endif /* __RZG2L_DU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c new file mode 100644 index 000000000000..07b312b6f81e --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RZ/G2L Display Unit Mode Setting + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_kms.c + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include <linux/device.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#include "rzg2l_du_crtc.h" +#include "rzg2l_du_drv.h" +#include "rzg2l_du_encoder.h" +#include "rzg2l_du_kms.h" +#include "rzg2l_du_vsp.h" + +/* ----------------------------------------------------------------------------- + * Format helpers + */ + +static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = { + { + .fourcc = DRM_FORMAT_XRGB8888, + .v4l2 = V4L2_PIX_FMT_XBGR32, + .bpp = 32, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_ARGB8888, + .v4l2 = V4L2_PIX_FMT_ABGR32, + .bpp = 32, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_RGB888, + .v4l2 = V4L2_PIX_FMT_BGR24, + .bpp = 24, + .planes = 1, + .hsub = 1, + } +}; + +const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rzg2l_du_format_infos); ++i) { + if (rzg2l_du_format_infos[i].fourcc == fourcc) + return &rzg2l_du_format_infos[i]; + } + + return NULL; +} + +/* ----------------------------------------------------------------------------- + * Frame buffer + */ + +int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + unsigned int align = 16 * args->bpp / 8; + + args->pitch = roundup(min_pitch, align); + + return drm_gem_dma_dumb_create_internal(file, dev, args); +} + +static struct drm_framebuffer * +rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + const struct rzg2l_du_format_info *format; + unsigned int max_pitch; + + format = rzg2l_du_format_info(mode_cmd->pixel_format); + if (!format) { + dev_dbg(dev->dev, "unsupported pixel format %p4cc\n", + &mode_cmd->pixel_format); + return ERR_PTR(-EINVAL); + } + + /* + * On RZ/G2L the memory interface is handled by the VSP that limits the + * pitch to 65535 bytes. + */ + max_pitch = 65535; + if (mode_cmd->pitches[0] > max_pitch) { + dev_dbg(dev->dev, "invalid pitch value %u\n", + mode_cmd->pitches[0]); + return ERR_PTR(-EINVAL); + } + + return drm_gem_fb_create(dev, file_priv, mode_cmd); +} + +/* ----------------------------------------------------------------------------- + * Initialization + */ + +static const struct drm_mode_config_helper_funcs rzg2l_du_mode_config_helper = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + +static const struct drm_mode_config_funcs rzg2l_du_mode_config_funcs = { + .fb_create = rzg2l_du_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int rzg2l_du_encoders_init_one(struct rzg2l_du_device *rcdu, + enum rzg2l_du_output output, + struct of_endpoint *ep) +{ + struct device_node *entity; + int ret; + + /* Locate the connected entity and initialize the encoder. */ + entity = of_graph_get_remote_port_parent(ep->local_node); + if (!entity) { + dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n", + ep->local_node); + return -ENODEV; + } + + if (!of_device_is_available(entity)) { + dev_dbg(rcdu->dev, + "connected entity %pOF is disabled, skipping\n", + entity); + of_node_put(entity); + return -ENODEV; + } + + ret = rzg2l_du_encoder_init(rcdu, output, entity); + if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK) + dev_warn(rcdu->dev, + "failed to initialize encoder %pOF on output %s (%d), skipping\n", + entity, rzg2l_du_output_name(output), ret); + + of_node_put(entity); + + return ret; +} + +static int rzg2l_du_encoders_init(struct rzg2l_du_device *rcdu) +{ + struct device_node *np = rcdu->dev->of_node; + struct device_node *ep_node; + unsigned int num_encoders = 0; + + /* + * Iterate over the endpoints and create one encoder for each output + * pipeline. + */ + for_each_endpoint_of_node(np, ep_node) { + enum rzg2l_du_output output; + struct of_endpoint ep; + unsigned int i; + int ret; + + ret = of_graph_parse_endpoint(ep_node, &ep); + if (ret < 0) { + of_node_put(ep_node); + return ret; + } + + /* Find the output route corresponding to the port number. */ + for (i = 0; i < RZG2L_DU_OUTPUT_MAX; ++i) { + if (rcdu->info->routes[i].port == ep.port) { + output = i; + break; + } + } + + if (i == RZG2L_DU_OUTPUT_MAX) { + dev_warn(rcdu->dev, + "port %u references unexisting output, skipping\n", + ep.port); + continue; + } + + /* Process the output pipeline. */ + ret = rzg2l_du_encoders_init_one(rcdu, output, &ep); + if (ret < 0) { + if (ret == -EPROBE_DEFER) { + of_node_put(ep_node); + return ret; + } + + continue; + } + + num_encoders++; + } + + return num_encoders; +} + +static int rzg2l_du_vsps_init(struct rzg2l_du_device *rcdu) +{ + const struct device_node *np = rcdu->dev->of_node; + const char *vsps_prop_name = "renesas,vsps"; + struct of_phandle_args args; + struct { + struct device_node *np; + unsigned int crtcs_mask; + } vsps[RZG2L_DU_MAX_VSPS] = { { NULL, }, }; + unsigned int vsps_count = 0; + unsigned int cells; + unsigned int i; + int ret; + + /* + * First parse the DT vsps property to populate the list of VSPs. Each + * entry contains a pointer to the VSP DT node and a bitmask of the + * connected DU CRTCs. + */ + ret = of_property_count_u32_elems(np, vsps_prop_name); + cells = ret / rcdu->num_crtcs - 1; + if (cells != 1) + return -EINVAL; + + for (i = 0; i < rcdu->num_crtcs; ++i) { + unsigned int j; + + ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name, + cells, i, &args); + if (ret < 0) + goto done; + + /* + * Add the VSP to the list or update the corresponding existing + * entry if the VSP has already been added. + */ + for (j = 0; j < vsps_count; ++j) { + if (vsps[j].np == args.np) + break; + } + + if (j < vsps_count) + of_node_put(args.np); + else + vsps[vsps_count++].np = args.np; + + vsps[j].crtcs_mask |= BIT(i); + + /* + * Store the VSP pointer and pipe index in the CRTC. If the + * second cell of the 'renesas,vsps' specifier isn't present, + * default to 0. + */ + rcdu->crtcs[i].vsp = &rcdu->vsps[j]; + rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0; + } + + /* + * Then initialize all the VSPs from the node pointers and CRTCs bitmask + * computed previously. + */ + for (i = 0; i < vsps_count; ++i) { + struct rzg2l_du_vsp *vsp = &rcdu->vsps[i]; + + vsp->index = i; + vsp->dev = rcdu; + + ret = rzg2l_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask); + if (ret) + goto done; + } + +done: + for (i = 0; i < ARRAY_SIZE(vsps); ++i) + of_node_put(vsps[i].np); + + return ret; +} + +int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu) +{ + struct drm_device *dev = &rcdu->ddev; + struct drm_encoder *encoder; + unsigned int num_encoders; + int ret; + + ret = drmm_mode_config_init(dev); + if (ret) + return ret; + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.normalize_zpos = true; + dev->mode_config.funcs = &rzg2l_du_mode_config_funcs; + dev->mode_config.helper_private = &rzg2l_du_mode_config_helper; + + /* + * The RZ DU uses the VSP1 for memory access, and is limited + * to frame sizes of 1920x1080. + */ + dev->mode_config.max_width = 1920; + dev->mode_config.max_height = 1080; + + rcdu->num_crtcs = hweight8(rcdu->info->channels_mask); + + /* + * Initialize vertical blanking interrupts handling. Start with vblank + * disabled for all CRTCs. + */ + ret = drm_vblank_init(dev, rcdu->num_crtcs); + if (ret < 0) + return ret; + + /* Initialize the compositors. */ + ret = rzg2l_du_vsps_init(rcdu); + if (ret < 0) + return ret; + + /* Create the CRTCs. */ + ret = rzg2l_du_crtc_create(rcdu); + if (ret < 0) + return ret; + + /* Initialize the encoders. */ + ret = rzg2l_du_encoders_init(rcdu); + if (ret < 0) + return dev_err_probe(rcdu->dev, ret, + "failed to initialize encoders\n"); + + if (ret == 0) { + dev_err(rcdu->dev, "error: no encoder could be initialized\n"); + return -EINVAL; + } + + num_encoders = ret; + + /* + * Set the possible CRTCs and possible clones. There's always at least + * one way for all encoders to clone each other, set all bits in the + * possible clones field. + */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder); + const struct rzg2l_du_output_routing *route = + &rcdu->info->routes[renc->output]; + + encoder->possible_crtcs = route->possible_outputs; + encoder->possible_clones = (1 << num_encoders) - 1; + } + + drm_mode_config_reset(dev); + + drm_kms_helper_poll_init(dev); + + return 0; +} diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h new file mode 100644 index 000000000000..876e97cfbf45 --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * RZ/G2L Display Unit Mode Setting + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_kms.h + */ + +#ifndef __RZG2L_DU_KMS_H__ +#define __RZG2L_DU_KMS_H__ + +#include <linux/types.h> + +struct dma_buf_attachment; +struct drm_file; +struct drm_device; +struct drm_gem_object; +struct drm_mode_create_dumb; +struct rzg2l_du_device; +struct sg_table; + +struct rzg2l_du_format_info { + u32 fourcc; + u32 v4l2; + unsigned int bpp; + unsigned int planes; + unsigned int hsub; +}; + +const struct rzg2l_du_format_info *rzg2l_du_format_info(u32 fourcc); + +int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu); + +int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +struct drm_gem_object * +rzg2l_du_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +#endif /* __RZG2L_DU_KMS_H__ */ diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c new file mode 100644 index 000000000000..0ae6331d6430 --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RZ/G2L Display Unit VSP-Based Compositor + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_vsp.c + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_blend.h> +#include <drm/drm_crtc.h> +#include <drm/drm_fb_dma_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_vblank.h> + +#include <linux/bitops.h> +#include <linux/dma-mapping.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> + +#include <media/vsp1.h> + +#include "rzg2l_du_drv.h" +#include "rzg2l_du_kms.h" +#include "rzg2l_du_vsp.h" + +static void rzg2l_du_vsp_complete(void *private, unsigned int status, u32 crc) +{ + struct rzg2l_du_crtc *crtc = private; + + if (crtc->vblank_enable) + drm_crtc_handle_vblank(&crtc->crtc); + + if (status & VSP1_DU_STATUS_COMPLETE) + rzg2l_du_crtc_finish_page_flip(crtc); + + drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc); +} + +void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc) +{ + const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; + struct vsp1_du_lif_config cfg = { + .width = mode->hdisplay, + .height = mode->vdisplay, + .interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE, + .callback = rzg2l_du_vsp_complete, + .callback_data = crtc, + }; + + vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg); +} + +void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc) +{ + vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL); +} + +void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc) +{ + struct vsp1_du_atomic_pipe_config cfg = { { 0, } }; + struct rzg2l_du_crtc_state *state; + + state = to_rzg2l_crtc_state(crtc->crtc.state); + + vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg); +} + +struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc, + unsigned int pipe_index) +{ + struct rzg2l_du_device *rcdu = crtc->vsp->dev; + struct drm_plane *plane = NULL; + + drm_for_each_plane(plane, &rcdu->ddev) { + struct rzg2l_du_vsp_plane *vsp_plane = to_rzg2l_vsp_plane(plane); + + if (vsp_plane->index == pipe_index) + break; + } + + return plane ? plane : ERR_PTR(-EINVAL); +} + +static const u32 rzg2l_du_vsp_formats[] = { + DRM_FORMAT_RGB332, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, + DRM_FORMAT_YUV422, + DRM_FORMAT_YVU422, + DRM_FORMAT_YUV444, + DRM_FORMAT_YVU444, +}; + +static void rzg2l_du_vsp_plane_setup(struct rzg2l_du_vsp_plane *plane) +{ + struct rzg2l_du_vsp_plane_state *state = + to_rzg2l_vsp_plane_state(plane->plane.state); + struct rzg2l_du_crtc *crtc = to_rzg2l_crtc(state->state.crtc); + struct drm_framebuffer *fb = plane->plane.state->fb; + const struct rzg2l_du_format_info *format; + struct vsp1_du_atomic_config cfg = { + .pixelformat = 0, + .pitch = fb->pitches[0], + .alpha = state->state.alpha >> 8, + .zpos = state->state.zpos, + }; + u32 fourcc = state->format->fourcc; + unsigned int i; + + cfg.src.left = state->state.src.x1 >> 16; + cfg.src.top = state->state.src.y1 >> 16; + cfg.src.width = drm_rect_width(&state->state.src) >> 16; + cfg.src.height = drm_rect_height(&state->state.src) >> 16; + + cfg.dst.left = state->state.dst.x1; + cfg.dst.top = state->state.dst.y1; + cfg.dst.width = drm_rect_width(&state->state.dst); + cfg.dst.height = drm_rect_height(&state->state.dst); + + for (i = 0; i < state->format->planes; ++i) { + struct drm_gem_dma_object *gem; + + gem = drm_fb_dma_get_gem_obj(fb, i); + cfg.mem[i] = gem->dma_addr + fb->offsets[i]; + } + + if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) { + switch (fourcc) { + case DRM_FORMAT_ARGB1555: + fourcc = DRM_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_ARGB4444: + fourcc = DRM_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB8888: + fourcc = DRM_FORMAT_XRGB8888; + break; + } + } + + format = rzg2l_du_format_info(fourcc); + cfg.pixelformat = format->v4l2; + + cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI; + + vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe, + plane->index, &cfg); +} + +static int __rzg2l_du_vsp_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state, + const struct rzg2l_du_format_info **format) +{ + struct drm_crtc_state *crtc_state; + int ret; + + if (!state->crtc) { + /* + * The visible field is not reset by the DRM core but only + * updated by drm_atomic_helper_check_plane_state, set it + * manually. + */ + state->visible = false; + *format = NULL; + return 0; + } + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true, true); + if (ret < 0) + return ret; + + if (!state->visible) { + *format = NULL; + return 0; + } + + *format = rzg2l_du_format_info(state->fb->format->format); + + return 0; +} + +static int rzg2l_du_vsp_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct rzg2l_du_vsp_plane_state *rstate = to_rzg2l_vsp_plane_state(new_plane_state); + + return __rzg2l_du_vsp_plane_atomic_check(plane, new_plane_state, &rstate->format); +} + +static void rzg2l_du_vsp_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct rzg2l_du_vsp_plane *rplane = to_rzg2l_vsp_plane(plane); + struct rzg2l_du_crtc *crtc = to_rzg2l_crtc(old_state->crtc); + + if (new_state->visible) + rzg2l_du_vsp_plane_setup(rplane); + else if (old_state->crtc) + vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe, + rplane->index, NULL); +} + +static const struct drm_plane_helper_funcs rzg2l_du_vsp_plane_helper_funcs = { + .atomic_check = rzg2l_du_vsp_plane_atomic_check, + .atomic_update = rzg2l_du_vsp_plane_atomic_update, +}; + +static struct drm_plane_state * +rzg2l_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct rzg2l_du_vsp_plane_state *copy; + + if (WARN_ON(!plane->state)) + return NULL; + + copy = kzalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, ©->state); + + return ©->state; +} + +static void rzg2l_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_rzg2l_vsp_plane_state(state)); +} + +static void rzg2l_du_vsp_plane_reset(struct drm_plane *plane) +{ + struct rzg2l_du_vsp_plane_state *state; + + if (plane->state) { + rzg2l_du_vsp_plane_atomic_destroy_state(plane, plane->state); + plane->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + + __drm_atomic_helper_plane_reset(plane, &state->state); +} + +static const struct drm_plane_funcs rzg2l_du_vsp_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = rzg2l_du_vsp_plane_reset, + .atomic_duplicate_state = rzg2l_du_vsp_plane_atomic_duplicate_state, + .atomic_destroy_state = rzg2l_du_vsp_plane_atomic_destroy_state, +}; + +static void rzg2l_du_vsp_cleanup(struct drm_device *dev, void *res) +{ + struct rzg2l_du_vsp *vsp = res; + + put_device(vsp->vsp); +} + +int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np, + unsigned int crtcs) +{ + struct rzg2l_du_device *rcdu = vsp->dev; + struct platform_device *pdev; + unsigned int num_crtcs = hweight32(crtcs); + unsigned int num_planes = 2; + unsigned int i; + int ret; + + /* Find the VSP device and initialize it. */ + pdev = of_find_device_by_node(np); + if (!pdev) + return -ENXIO; + + vsp->vsp = &pdev->dev; + + ret = drmm_add_action_or_reset(&rcdu->ddev, rzg2l_du_vsp_cleanup, vsp); + if (ret < 0) + return ret; + + ret = vsp1_du_init(vsp->vsp); + if (ret < 0) + return ret; + + for (i = 0; i < num_planes; ++i) { + enum drm_plane_type type = i < num_crtcs + ? DRM_PLANE_TYPE_PRIMARY + : DRM_PLANE_TYPE_OVERLAY; + struct rzg2l_du_vsp_plane *plane; + + plane = drmm_universal_plane_alloc(&rcdu->ddev, struct rzg2l_du_vsp_plane, + plane, crtcs, &rzg2l_du_vsp_plane_funcs, + rzg2l_du_vsp_formats, + ARRAY_SIZE(rzg2l_du_vsp_formats), + NULL, type, NULL); + if (IS_ERR(plane)) + return PTR_ERR(plane); + + plane->vsp = vsp; + plane->index = i; + + drm_plane_helper_add(&plane->plane, + &rzg2l_du_vsp_plane_helper_funcs); + } + + return 0; +} diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h new file mode 100644 index 000000000000..322eb80dcbaf --- /dev/null +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * RZ/G2L Display Unit VSP-Based Compositor + * + * Copyright (C) 2023 Renesas Electronics Corporation + * + * Based on rcar_du_vsp.h + */ + +#ifndef __RZG2L_DU_VSP_H__ +#define __RZG2L_DU_VSP_H__ + +#include <drm/drm_plane.h> +#include <linux/container_of.h> +#include <linux/scatterlist.h> + +struct device; +struct drm_framebuffer; +struct rzg2l_du_device; +struct rzg2l_du_format_info; +struct rzg2l_du_vsp; + +struct rzg2l_du_vsp_plane { + struct drm_plane plane; + struct rzg2l_du_vsp *vsp; + unsigned int index; +}; + +struct rzg2l_du_vsp { + unsigned int index; + struct device *vsp; + struct rzg2l_du_device *dev; +}; + +static inline struct rzg2l_du_vsp_plane *to_rzg2l_vsp_plane(struct drm_plane *p) +{ + return container_of(p, struct rzg2l_du_vsp_plane, plane); +} + +/** + * struct rzg2l_du_vsp_plane_state - Driver-specific plane state + * @state: base DRM plane state + * @format: information about the pixel format used by the plane + */ +struct rzg2l_du_vsp_plane_state { + struct drm_plane_state state; + + const struct rzg2l_du_format_info *format; +}; + +static inline struct rzg2l_du_vsp_plane_state * +to_rzg2l_vsp_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct rzg2l_du_vsp_plane_state, state); +} + +#if IS_ENABLED(CONFIG_VIDEO_RENESAS_VSP1) +int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np, + unsigned int crtcs); +void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc); +void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc); +void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc); +struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc, + unsigned int pipe_index); +#else +static inline int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np, + unsigned int crtcs) +{ + return -ENXIO; +} + +static inline void rzg2l_du_vsp_enable(struct rzg2l_du_crtc *crtc) { }; +static inline void rzg2l_du_vsp_disable(struct rzg2l_du_crtc *crtc) { }; +static inline void rzg2l_du_vsp_atomic_flush(struct rzg2l_du_crtc *crtc) { }; +static inline struct drm_plane *rzg2l_du_vsp_get_drm_plane(struct rzg2l_du_crtc *crtc, + unsigned int pipe_index) +{ + return ERR_PTR(-ENXIO); +} +#endif + +#endif /* __RZG2L_DU_VSP_H__ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 85b3b4871a1d..fdd768bbd487 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); } - if (!clock) + if (!clock) { + vop2_unlock(vop2); return; + } if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA && !(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT)) diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 06cedfe4b486..0f35f009b9d3 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -33,9 +33,7 @@ static struct kmem_cache *sched_fence_slab; static int __init drm_sched_fence_slab_init(void) { - sched_fence_slab = kmem_cache_create( - "drm_sched_fence", sizeof(struct drm_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); + sched_fence_slab = KMEM_CACHE(drm_sched_fence, SLAB_HWCACHE_ALIGN); if (!sched_fence_slab) return -ENOMEM; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8acbef7ae53d..7e90c9f95611 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1178,21 +1178,24 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_sched_entity *entity; struct dma_fence *fence; struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job = NULL; + struct drm_sched_job *sched_job; int r; if (READ_ONCE(sched->pause_submit)) return; /* Find entity with a ready job */ - while (!sched_job && (entity = drm_sched_select_entity(sched))) { - sched_job = drm_sched_entity_pop_job(entity); - if (!sched_job) - complete_all(&entity->entity_idle); - } + entity = drm_sched_select_entity(sched); if (!entity) return; /* No more work */ + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) { + complete_all(&entity->entity_idle); + drm_sched_run_job_queue(sched); + return; + } + s_fence = sched_job->s_fence; atomic_add(sched_job->credits, &sched->credit_count); diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 152375f3de2e..b7cf369b1906 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -30,19 +30,11 @@ #include "sun4i_drv.h" #include "sun4i_hdmi.h" -static inline struct sun4i_hdmi * -drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder) -{ - return container_of(encoder, struct sun4i_hdmi, - encoder); -} +#define drm_encoder_to_sun4i_hdmi(e) \ + container_of_const(e, struct sun4i_hdmi, encoder) -static inline struct sun4i_hdmi * -drm_connector_to_sun4i_hdmi(struct drm_connector *connector) -{ - return container_of(connector, struct sun4i_hdmi, - connector); -} +#define drm_connector_to_sun4i_hdmi(c) \ + container_of_const(c, struct sun4i_hdmi, connector) static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi, struct drm_display_mode *mode) @@ -70,19 +62,8 @@ static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi, return 0; } -static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct drm_display_mode *mode = &crtc_state->mode; - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - return -EINVAL; - - return 0; -} - -static void sun4i_hdmi_disable(struct drm_encoder *encoder) +static void sun4i_hdmi_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); u32 val; @@ -96,37 +77,17 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) clk_disable_unprepare(hdmi->tmds_clk); } -static void sun4i_hdmi_enable(struct drm_encoder *encoder) +static void sun4i_hdmi_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); struct drm_display_info *display = &hdmi->connector.display_info; + unsigned int x, y; u32 val = 0; DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); - clk_prepare_enable(hdmi->tmds_clk); - - sun4i_hdmi_setup_avi_infoframes(hdmi, mode); - val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); - val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); - writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0)); - - val = SUN4I_HDMI_VID_CTRL_ENABLE; - if (display->is_hdmi) - val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE; - - writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); -} - -static void sun4i_hdmi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); - unsigned int x, y; - u32 val; - clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000); clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000); @@ -178,34 +139,76 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder, val |= SUN4I_HDMI_VID_TIMING_POL_VSYNC; writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG); + + clk_prepare_enable(hdmi->tmds_clk); + + sun4i_hdmi_setup_avi_infoframes(hdmi, mode); + val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); + val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); + writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0)); + + val = SUN4I_HDMI_VID_CTRL_ENABLE; + if (display->is_hdmi) + val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE; + + writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); } -static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder, - const struct drm_display_mode *mode) +static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = { + .atomic_disable = sun4i_hdmi_disable, + .atomic_enable = sun4i_hdmi_enable, +}; + +static enum drm_mode_status +sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector, + const struct drm_display_mode *mode, + unsigned long long clock) { - struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); - unsigned long rate = mode->clock * 1000; - unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */ + const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); + unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */ long rounded_rate; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_BAD; + /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */ - if (rate > 165000000) + if (clock > 165000000) return MODE_CLOCK_HIGH; - rounded_rate = clk_round_rate(hdmi->tmds_clk, rate); + + rounded_rate = clk_round_rate(hdmi->tmds_clk, clock); if (rounded_rate > 0 && - max_t(unsigned long, rounded_rate, rate) - - min_t(unsigned long, rounded_rate, rate) < diff) + max_t(unsigned long, rounded_rate, clock) - + min_t(unsigned long, rounded_rate, clock) < diff) return MODE_OK; + return MODE_NOCLOCK; } -static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = { - .atomic_check = sun4i_hdmi_atomic_check, - .disable = sun4i_hdmi_disable, - .enable = sun4i_hdmi_enable, - .mode_set = sun4i_hdmi_mode_set, - .mode_valid = sun4i_hdmi_mode_valid, -}; +static int sun4i_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc *crtc = conn_state->crtc; + struct drm_crtc_state *crtc_state = crtc->state; + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + enum drm_mode_status status; + + status = sun4i_hdmi_connector_clock_valid(connector, mode, + mode->clock * 1000); + if (status != MODE_OK) + return -EINVAL; + + return 0; +} + +static enum drm_mode_status +sun4i_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return sun4i_hdmi_connector_clock_valid(connector, mode, + mode->clock * 1000); +} static int sun4i_hdmi_get_modes(struct drm_connector *connector) { @@ -251,6 +254,8 @@ static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev) } static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = { + .atomic_check = sun4i_hdmi_connector_atomic_check, + .mode_valid = sun4i_hdmi_connector_mode_valid, .get_modes = sun4i_hdmi_get_modes, }; diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index a719af1dc9a5..46170753699d 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -159,6 +159,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, if (gem->size < size) { err = -EINVAL; + drm_gem_object_put(gem); goto unreference; } diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index ea2af6bd9abe..2f32fb2f12e7 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -8,6 +8,7 @@ #include <linux/prime_numbers.h> #include <linux/sched/signal.h> +#include <linux/sizes.h> #include <drm/drm_buddy.h> @@ -18,6 +19,92 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void drm_test_buddy_alloc_contiguous(struct kunit *test) +{ + const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K; + unsigned long i, n_pages, total; + struct drm_buddy_block *block; + struct drm_buddy mm; + LIST_HEAD(left); + LIST_HEAD(middle); + LIST_HEAD(right); + LIST_HEAD(allocated); + + KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + + /* + * Idea is to fragment the address space by alternating block + * allocations between three different lists; one for left, middle and + * right. We can then free a list to simulate fragmentation. In + * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, + * including the try_harder path. + */ + + i = 0; + n_pages = mm_size / ps; + do { + struct list_head *list; + int slot = i % 3; + + if (slot == 0) + list = &left; + else if (slot == 1) + list = &middle; + else + list = &right; + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, 0, mm_size, + ps, ps, list, 0), + "buddy_alloc hit an error size=%u\n", + ps); + } while (++i < n_pages); + + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%u\n", 3 * ps); + + drm_buddy_free_list(&mm, &middle); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%u\n", 3 * ps); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 2 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%u\n", 2 * ps); + + drm_buddy_free_list(&mm, &right); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%u\n", 3 * ps); + /* + * At this point we should have enough contiguous space for 2 blocks, + * however they are never buddies (since we freed middle and right) so + * will require the try_harder logic to find them. + */ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 2 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc hit an error size=%u\n", 2 * ps); + + drm_buddy_free_list(&mm, &left); + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc hit an error size=%u\n", 3 * ps); + + total = 0; + list_for_each_entry(block, &allocated, link) + total += drm_buddy_block_size(&mm, block); + + KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); + + drm_buddy_free_list(&mm, &allocated); + drm_buddy_fini(&mm); +} + static void drm_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start = 0; @@ -280,6 +367,7 @@ static struct kunit_case drm_buddy_tests[] = { KUNIT_CASE(drm_test_buddy_alloc_optimistic), KUNIT_CASE(drm_test_buddy_alloc_pessimistic), KUNIT_CASE(drm_test_buddy_alloc_pathological), + KUNIT_CASE(drm_test_buddy_alloc_contiguous), {} }; diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c index c66aa2dc8d9d..44f82ed2a958 100644 --- a/drivers/gpu/drm/tests/drm_connector_test.c +++ b/drivers/gpu/drm/tests/drm_connector_test.c @@ -3,10 +3,175 @@ * Kunit test for drm_modes functions */ +#include <linux/i2c.h> + +#include <drm/drm_atomic_state_helper.h> #include <drm/drm_connector.h> +#include <drm/drm_drv.h> +#include <drm/drm_kunit_helpers.h> #include <kunit/test.h> +struct drm_connector_init_priv { + struct drm_device drm; + struct drm_connector connector; + struct i2c_adapter ddc; +}; + +static const struct drm_connector_funcs dummy_funcs = { + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .reset = drm_atomic_helper_connector_reset, +}; + +static int dummy_ddc_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) +{ + return num; +} + +static u32 dummy_ddc_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm dummy_ddc_algorithm = { + .master_xfer = dummy_ddc_xfer, + .functionality = dummy_ddc_func, +}; + +static void i2c_del_adapter_wrapper(void *ptr) +{ + struct i2c_adapter *adap = ptr; + + i2c_del_adapter(adap); +} + +static int drm_test_connector_init(struct kunit *test) +{ + struct drm_connector_init_priv *priv; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + priv = drm_kunit_helper_alloc_drm_device(test, dev, + struct drm_connector_init_priv, drm, + DRIVER_MODESET | DRIVER_ATOMIC); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + strscpy(priv->ddc.name, "dummy-connector-ddc", sizeof(priv->ddc.name)); + priv->ddc.owner = THIS_MODULE; + priv->ddc.algo = &dummy_ddc_algorithm; + priv->ddc.dev.parent = dev; + + ret = i2c_add_adapter(&priv->ddc); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = kunit_add_action_or_reset(test, i2c_del_adapter_wrapper, &priv->ddc); + KUNIT_ASSERT_EQ(test, ret, 0); + + test->priv = priv; + return 0; +} + +/* + * Test that the registration of a bog standard connector works as + * expected and doesn't report any error. + */ +static void drm_test_drmm_connector_init(struct kunit *test) +{ + struct drm_connector_init_priv *priv = test->priv; + int ret; + + ret = drmm_connector_init(&priv->drm, &priv->connector, + &dummy_funcs, + DRM_MODE_CONNECTOR_HDMIA, + &priv->ddc); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +/* + * Test that the registration of a connector without a DDC adapter + * doesn't report any error. + */ +static void drm_test_drmm_connector_init_null_ddc(struct kunit *test) +{ + struct drm_connector_init_priv *priv = test->priv; + int ret; + + ret = drmm_connector_init(&priv->drm, &priv->connector, + &dummy_funcs, + DRM_MODE_CONNECTOR_HDMIA, + NULL); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +/* + * Test that the registration of a connector succeeds for all possible + * connector types. + */ +static void drm_test_drmm_connector_init_type_valid(struct kunit *test) +{ + struct drm_connector_init_priv *priv = test->priv; + unsigned int connector_type = *(unsigned int *)test->param_value; + int ret; + + ret = drmm_connector_init(&priv->drm, &priv->connector, + &dummy_funcs, + connector_type, + &priv->ddc); + KUNIT_EXPECT_EQ(test, ret, 0); +} + +static const unsigned int drm_connector_init_type_valid_tests[] = { + DRM_MODE_CONNECTOR_Unknown, + DRM_MODE_CONNECTOR_VGA, + DRM_MODE_CONNECTOR_DVII, + DRM_MODE_CONNECTOR_DVID, + DRM_MODE_CONNECTOR_DVIA, + DRM_MODE_CONNECTOR_Composite, + DRM_MODE_CONNECTOR_SVIDEO, + DRM_MODE_CONNECTOR_LVDS, + DRM_MODE_CONNECTOR_Component, + DRM_MODE_CONNECTOR_9PinDIN, + DRM_MODE_CONNECTOR_DisplayPort, + DRM_MODE_CONNECTOR_HDMIA, + DRM_MODE_CONNECTOR_HDMIB, + DRM_MODE_CONNECTOR_TV, + DRM_MODE_CONNECTOR_eDP, + DRM_MODE_CONNECTOR_VIRTUAL, + DRM_MODE_CONNECTOR_DSI, + DRM_MODE_CONNECTOR_DPI, + DRM_MODE_CONNECTOR_WRITEBACK, + DRM_MODE_CONNECTOR_SPI, + DRM_MODE_CONNECTOR_USB, +}; + +static void drm_connector_init_type_desc(const unsigned int *type, char *desc) +{ + sprintf(desc, "%s", drm_get_connector_type_name(*type)); +} + +KUNIT_ARRAY_PARAM(drm_connector_init_type_valid, + drm_connector_init_type_valid_tests, + drm_connector_init_type_desc); + +static struct kunit_case drmm_connector_init_tests[] = { + KUNIT_CASE(drm_test_drmm_connector_init), + KUNIT_CASE(drm_test_drmm_connector_init_null_ddc), + KUNIT_CASE_PARAM(drm_test_drmm_connector_init_type_valid, + drm_connector_init_type_valid_gen_params), + { } +}; + +static struct kunit_suite drmm_connector_init_test_suite = { + .name = "drmm_connector_init", + .init = drm_test_connector_init, + .test_cases = drmm_connector_init_tests, +}; + struct drm_get_tv_mode_from_name_test { const char *name; enum drm_connector_tv_mode expected_mode; @@ -70,7 +235,10 @@ static struct kunit_suite drm_get_tv_mode_from_name_test_suite = { .test_cases = drm_get_tv_mode_from_name_tests, }; -kunit_test_suite(drm_get_tv_mode_from_name_test_suite); +kunit_test_suites( + &drmm_connector_init_test_suite, + &drm_get_tv_mode_from_name_test_suite +); MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index ca4f8e4c5d5d..d5317d13d3fc 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fourcc.h> #include <drm/drm_kunit_helpers.h> #include <drm/drm_managed.h> @@ -14,6 +16,8 @@ #define KUNIT_DEVICE_NAME "drm-kunit-mock-device" static const struct drm_mode_config_funcs drm_mode_config_funcs = { + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, }; /** @@ -161,5 +165,151 @@ drm_kunit_helper_atomic_state_alloc(struct kunit *test, } EXPORT_SYMBOL_GPL(drm_kunit_helper_atomic_state_alloc); +static const uint32_t default_plane_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static const uint64_t default_plane_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const struct drm_plane_helper_funcs default_plane_helper_funcs = { +}; + +static const struct drm_plane_funcs default_plane_funcs = { + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .reset = drm_atomic_helper_plane_reset, +}; + +/** + * drm_kunit_helper_create_primary_plane - Creates a mock primary plane for a KUnit test + * @test: The test context object + * @drm: The device to alloc the plane for + * @funcs: Callbacks for the new plane. Optional. + * @helper_funcs: Helpers callbacks for the new plane. Optional. + * @formats: array of supported formats (DRM_FORMAT\_\*). Optional. + * @num_formats: number of elements in @formats + * @modifiers: array of struct drm_format modifiers terminated by + * DRM_FORMAT_MOD_INVALID. Optional. + * + * This allocates and initializes a mock struct &drm_plane meant to be + * part of a mock device for a KUnit test. + * + * Resources will be cleaned up automatically. + * + * @funcs will default to the default helpers implementations. + * @helper_funcs will default to an empty implementation. @formats will + * default to XRGB8888 only. @modifiers will default to a linear + * modifier only. + * + * Returns: + * A pointer to the new plane, or an ERR_PTR() otherwise. + */ +struct drm_plane * +drm_kunit_helper_create_primary_plane(struct kunit *test, + struct drm_device *drm, + const struct drm_plane_funcs *funcs, + const struct drm_plane_helper_funcs *helper_funcs, + const uint32_t *formats, + unsigned int num_formats, + const uint64_t *modifiers) +{ + struct drm_plane *plane; + + if (!funcs) + funcs = &default_plane_funcs; + + if (!helper_funcs) + helper_funcs = &default_plane_helper_funcs; + + if (!formats || !num_formats) { + formats = default_plane_formats; + num_formats = ARRAY_SIZE(default_plane_formats); + } + + if (!modifiers) + modifiers = default_plane_modifiers; + + plane = __drmm_universal_plane_alloc(drm, + sizeof(struct drm_plane), 0, + 0, + funcs, + formats, + num_formats, + default_plane_modifiers, + DRM_PLANE_TYPE_PRIMARY, + NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane); + + drm_plane_helper_add(plane, helper_funcs); + + return plane; +} +EXPORT_SYMBOL_GPL(drm_kunit_helper_create_primary_plane); + +static const struct drm_crtc_helper_funcs default_crtc_helper_funcs = { +}; + +static const struct drm_crtc_funcs default_crtc_funcs = { + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .reset = drm_atomic_helper_crtc_reset, +}; + +/** + * drm_kunit_helper_create_crtc - Creates a mock CRTC for a KUnit test + * @test: The test context object + * @drm: The device to alloc the plane for + * @primary: Primary plane for CRTC + * @cursor: Cursor plane for CRTC. Optional. + * @funcs: Callbacks for the new plane. Optional. + * @helper_funcs: Helpers callbacks for the new plane. Optional. + * + * This allocates and initializes a mock struct &drm_crtc meant to be + * part of a mock device for a KUnit test. + * + * Resources will be cleaned up automatically. + * + * @funcs will default to the default helpers implementations. + * @helper_funcs will default to an empty implementation. + * + * Returns: + * A pointer to the new CRTC, or an ERR_PTR() otherwise. + */ +struct drm_crtc * +drm_kunit_helper_create_crtc(struct kunit *test, + struct drm_device *drm, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const struct drm_crtc_helper_funcs *helper_funcs) +{ + struct drm_crtc *crtc; + int ret; + + if (!funcs) + funcs = &default_crtc_funcs; + + if (!helper_funcs) + helper_funcs = &default_crtc_helper_funcs; + + crtc = drmm_kzalloc(drm, sizeof(*crtc), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, crtc); + + ret = drmm_crtc_init_with_planes(drm, crtc, + primary, + cursor, + funcs, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_crtc_helper_add(crtc, helper_funcs); + + return crtc; +} +EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); + MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index 1eb0c304f960..3488d930e3a3 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -188,7 +188,7 @@ out: static void drm_test_mm_debug(struct kunit *test) { - struct drm_printer p = drm_debug_printer(test->name); + struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, test->name); struct drm_mm mm; struct drm_mm_node nodes[2]; diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 5f838980c7a1..94f8e3178df5 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -265,6 +265,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc, reinit_completion(&tcrtc->framedone_completion); + /* + * If a layer is left enabled when the videoport is disabled, and the + * vid pipeline that was used for the layer is taken into use on + * another videoport, the DSS will report sync lost issues. Disable all + * the layers here as a work-around. + */ + for (u32 layer = 0; layer < tidss->feat->num_planes; layer++) + dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer, + false); + dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport); if (!wait_for_completion_timeout(&tcrtc->framedone_completion, diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index e1c0ef0c3894..68fed531f6a7 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -213,7 +213,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss, drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); - drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0, + drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0, num_planes - 1); ret = drm_plane_create_color_properties(&tplane->plane, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ba3f09e2d7e6..96a724e8f3ff 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -49,7 +49,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - struct drm_printer p = drm_debug_printer(TTM_PFX); + struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX); struct ttm_resource_manager *man; int i, mem_type; diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index b62f420a9f96..112438d965ff 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, enum ttm_caching caching, pgoff_t start_page, pgoff_t end_page) { - struct page **pages = tt->pages; + struct page **pages = &tt->pages[start_page]; unsigned int order; pgoff_t i, nr; diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 1bdfac8beafd..a07ede668cc1 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -40,7 +40,7 @@ void v3d_free_object(struct drm_gem_object *obj) mutex_lock(&v3d->bo_lock); v3d->bo_stats.num_allocated--; - v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; + v3d->bo_stats.pages_allocated -= obj->size >> V3D_MMU_PAGE_SHIFT; mutex_unlock(&v3d->bo_lock); spin_lock(&v3d->mm_lock); @@ -109,8 +109,8 @@ v3d_bo_create_finish(struct drm_gem_object *obj) * lifetime of the BO. */ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, - obj->size >> PAGE_SHIFT, - GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + obj->size >> V3D_MMU_PAGE_SHIFT, + GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, 0, 0); spin_unlock(&v3d->mm_lock); if (ret) return ret; @@ -118,7 +118,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) /* Track stats for /debug/dri/n/bo_stats. */ mutex_lock(&v3d->bo_lock); v3d->bo_stats.num_allocated++; - v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + v3d->bo_stats.pages_allocated += obj->size >> V3D_MMU_PAGE_SHIFT; mutex_unlock(&v3d->bo_lock); v3d_mmu_insert_ptes(bo); @@ -201,7 +201,7 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); - args->offset = bo->node.start << PAGE_SHIFT; + args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT; ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); drm_gem_object_put(&bo->base.base); @@ -246,7 +246,7 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, } bo = to_v3d_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; + args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT; drm_gem_object_put(gem_obj); return 0; diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index dc3cf708d02e..19e3ee7ac897 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -219,7 +219,7 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused) seq_printf(m, "allocated bos: %d\n", v3d->bo_stats.num_allocated); seq_printf(m, "allocated bo size (kb): %ld\n", - (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10)); + (long)v3d->bo_stats.pages_allocated << (V3D_MMU_PAGE_SHIFT - 10)); mutex_unlock(&v3d->bo_lock); return 0; diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 3c7d58866570..1950c723dde1 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -19,6 +19,8 @@ struct reset_control; #define GMP_GRANULARITY (128 * 1024) +#define V3D_MMU_PAGE_SHIFT 12 + #define V3D_MAX_QUEUES (V3D_CPU + 1) static inline char *v3d_queue_to_string(enum v3d_queue queue) diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index afc76390a197..2e04f6cb661e 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -70,7 +70,7 @@ v3d_overflow_mem_work(struct work_struct *work) list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); - V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT); V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); out: diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 5a453532901f..14f3af40d6f6 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -21,8 +21,6 @@ #include "v3d_drv.h" #include "v3d_regs.h" -#define V3D_MMU_PAGE_SHIFT 12 - /* Note: All PTEs for the 1MB superpage must be filled with the * superpage bit set. */ diff --git a/drivers/gpu/drm/xe/.kunitconfig b/drivers/gpu/drm/xe/.kunitconfig index 9590eac91af3..ad4b9b4a9f55 100644 --- a/drivers/gpu/drm/xe/.kunitconfig +++ b/drivers/gpu/drm/xe/.kunitconfig @@ -11,3 +11,8 @@ CONFIG_DRM_XE_DISPLAY=n CONFIG_EXPERT=y CONFIG_FB=y CONFIG_DRM_XE_KUNIT_TEST=y +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_LOCKDEP=y +CONFIG_DEBUG_LOCKDEP=y diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index e36ae1f0d885..0e31dfb8989e 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_XE tristate "Intel Xe Graphics" - depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) && 64BIT + depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index efcf0ab7a1a6..c531210695db 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -76,6 +76,7 @@ xe-y += xe_bb.o \ xe_ggtt.o \ xe_gpu_scheduler.o \ xe_gsc.o \ + xe_gsc_proxy.o \ xe_gsc_submit.o \ xe_gt.o \ xe_gt_ccs_mode.o \ @@ -92,6 +93,7 @@ xe-y += xe_bb.o \ xe_guc.o \ xe_guc_ads.o \ xe_guc_ct.o \ + xe_guc_db_mgr.o \ xe_guc_debugfs.o \ xe_guc_hwconfig.o \ xe_guc_log.o \ @@ -137,6 +139,7 @@ xe-y += xe_bb.o \ xe_uc_debugfs.o \ xe_uc_fw.o \ xe_vm.o \ + xe_vram_freq.o \ xe_wait_user_fence.o \ xe_wa.o \ xe_wopcm.o @@ -145,18 +148,23 @@ xe-y += xe_bb.o \ xe-$(CONFIG_HWMON) += xe_hwmon.o # graphics virtualization (SR-IOV) support -xe-y += xe_sriov.o +xe-y += \ + xe_guc_relay.o \ + xe_memirq.o \ + xe_sriov.o xe-$(CONFIG_PCI_IOV) += \ xe_lmtt.o \ xe_lmtt_2l.o \ xe_lmtt_ml.o +xe-$(CONFIG_DRM_XE_KUNIT_TEST) += \ + tests/xe_kunit_helpers.o + # i915 Display compat #defines and #includes subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \ -I$(srctree)/$(src)/display/ext \ -I$(srctree)/$(src)/compat-i915-headers \ - -I$(srctree)/drivers/gpu/drm/xe/display/ \ -I$(srctree)/drivers/gpu/drm/i915/display/ \ -Ddrm_i915_gem_object=xe_bo \ -Ddrm_i915_private=xe_device @@ -176,17 +184,17 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE # Display code specific to xe xe-$(CONFIG_DRM_XE_DISPLAY) += \ - xe_display.o \ - display/xe_fb_pin.o \ - display/xe_hdcp_gsc.o \ - display/xe_plane_initial.o \ - display/xe_display_rps.o \ + display/ext/i915_irq.o \ + display/ext/i915_utils.o \ + display/intel_fb_bo.o \ + display/intel_fbdev_fb.o \ + display/xe_display.o \ display/xe_display_misc.o \ + display/xe_display_rps.o \ display/xe_dsb_buffer.o \ - display/intel_fbdev_fb.o \ - display/intel_fb_bo.o \ - display/ext/i915_irq.o \ - display/ext/i915_utils.o + display/xe_fb_pin.o \ + display/xe_hdcp_gsc.o \ + display/xe_plane_initial.o # SOC code shared with i915 xe-$(CONFIG_DRM_XE_DISPLAY) += \ @@ -213,8 +221,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_ddi.o \ i915-display/intel_ddi_buf_trans.o \ i915-display/intel_display.o \ - i915-display/intel_display_debugfs.o \ - i915-display/intel_display_debugfs_params.o \ i915-display/intel_display_device.o \ i915-display/intel_display_driver.o \ i915-display/intel_display_irq.o \ @@ -258,7 +264,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_modeset_setup.o \ i915-display/intel_modeset_verify.o \ i915-display/intel_panel.o \ - i915-display/intel_pipe_crc.o \ i915-display/intel_pmdemand.o \ i915-display/intel_pps.o \ i915-display/intel_psr.o \ @@ -285,6 +290,13 @@ ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y) xe-$(CONFIG_DRM_XE_DISPLAY) += i915-display/intel_fbdev.o endif +ifeq ($(CONFIG_DEBUG_FS),y) + xe-$(CONFIG_DRM_XE_DISPLAY) += \ + i915-display/intel_display_debugfs.o \ + i915-display/intel_display_debugfs_params.o \ + i915-display/intel_pipe_crc.o +endif + obj-$(CONFIG_DRM_XE) += xe.o obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h new file mode 100644 index 000000000000..80bbf06a3eb8 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/gsc_proxy_commands_abi.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _ABI_GSC_PROXY_COMMANDS_ABI_H +#define _ABI_GSC_PROXY_COMMANDS_ABI_H + +#include <linux/types.h> + +/* Heci client ID for proxy commands */ +#define HECI_MEADDRESS_PROXY 10 + +/* FW-defined proxy header */ +struct xe_gsc_proxy_header { + /* + * hdr: + * Bits 0-7: type of the proxy message (see enum xe_gsc_proxy_type) + * Bits 8-15: rsvd + * Bits 16-31: length in bytes of the payload following the proxy header + */ + u32 hdr; +#define GSC_PROXY_TYPE GENMASK(7, 0) +#define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16) + + u32 source; /* Source of the Proxy message */ + u32 destination; /* Destination of the Proxy message */ +#define GSC_PROXY_ADDRESSING_KMD 0x10000 +#define GSC_PROXY_ADDRESSING_GSC 0x20000 +#define GSC_PROXY_ADDRESSING_CSME 0x30000 + + u32 status; /* Command status */ +} __packed; + +/* FW-defined proxy types */ +enum xe_gsc_proxy_type { + GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0, + GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1, + GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2, + GSC_PROXY_MSG_TYPE_PROXY_END = 3, + GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4, +}; + +#endif diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h new file mode 100644 index 000000000000..5496a5890847 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _GUC_ACTIONS_PF_ABI_H +#define _GUC_ACTIONS_PF_ABI_H + +#include "guc_communication_ctb_abi.h" + +/** + * DOC: GUC2PF_RELAY_FROM_VF + * + * This message is used by the GuC firmware to forward a VF2PF `Relay Message`_ + * received from the Virtual Function (VF) driver to this Physical Function (PF) + * driver. + * + * This message is always sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF` = 0x5100 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - source VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+-----------------+--------------------------------------------+ + * | 3 | 31:0 | **RELAY_DATA1** | | + * +---+-------+-----------------+ | + * |...| | | [Embedded `Relay Message`_] | + * +---+-------+-----------------+ | + * | n | 31:0 | **RELAY_DATAx** | | + * +---+-------+-----------------+--------------------------------------------+ + */ +#define XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF 0x5100 + +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u) +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN \ + (GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN) +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_3_RELAY_DATA1 GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN + +/** + * DOC: PF2GUC_RELAY_TO_VF + * + * This H2G message is used by the Physical Function (PF) driver to send embedded + * VF2PF `Relay Message`_ to the VF. + * + * This action message must be sent over CTB as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`XE_GUC_ACTION_PF2GUC_RELAY_TO_VF` = 0x5101 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - target VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+-----------------+--------------------------------------------+ + * | 3 | 31:0 | **RELAY_DATA1** | | + * +---+-------+-----------------+ | + * |...| | | [Embedded `Relay Message`_] | + * +---+-------+-----------------+ | + * | n | 31:0 | **RELAY_DATAx** | | + * +---+-------+-----------------+--------------------------------------------+ + */ +#define XE_GUC_ACTION_PF2GUC_RELAY_TO_VF 0x5101 + +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 2u) +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MAX_LEN \ + (PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN) +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_3_RELAY_DATA1 GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN + +/** + * DOC: GUC2VF_RELAY_FROM_PF + * + * This message is used by the GuC firmware to deliver `Relay Message`_ from the + * Physical Function (PF) driver to this Virtual Function (VF) driver. + * See `GuC Relay Communication`_ for details. + * + * This message is always sent over CTB. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF` = 0x5102 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+-----------------+--------------------------------------------+ + * | 2 | 31:0 | **RELAY_DATA1** | | + * +---+-------+-----------------+ | + * |...| | | [Embedded `Relay Message`_] | + * +---+-------+-----------------+ | + * | n | 31:0 | **RELAY_DATAx** | | + * +---+-------+-----------------+--------------------------------------------+ + */ +#define XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF 0x5102 + +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 1u) +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN \ + (GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN) +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN + +/** + * DOC: VF2GUC_RELAY_TO_PF + * + * This message is used by the Virtual Function (VF) drivers to communicate with + * the Physical Function (PF) driver and send `Relay Message`_ to the PF driver. + * See `GuC Relay Communication`_ for details. + * + * This message must be sent over CTB. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`XE_GUC_ACTION_VF2GUC_RELAY_TO_PF` = 0x5103 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+-----------------+--------------------------------------------+ + * | 2 | 31:0 | **RELAY_DATA1** | | + * +---+-------+-----------------+ | + * |...| | | [Embedded `Relay Message`_] | + * +---+-------+-----------------+ | + * | n | 31:0 | **RELAY_DATAx** | | + * +---+-------+-----------------+--------------------------------------------+ + */ +#define XE_GUC_ACTION_VF2GUC_RELAY_TO_PF 0x5103 + +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MAX_LEN \ + (VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN + GUC_RELAY_MSG_MAX_LEN) +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_NUM_RELAY_DATA GUC_RELAY_MSG_MAX_LEN + +#endif diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h index 0b1146d0c997..8f86a16dc577 100644 --- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h @@ -81,12 +81,13 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64); #define GUC_CTB_HDR_LEN 1u #define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN -#define GUC_CTB_MSG_MAX_LEN 256u +#define GUC_CTB_MSG_MAX_LEN (GUC_CTB_MSG_MIN_LEN + GUC_CTB_MAX_DWORDS) #define GUC_CTB_MSG_0_FENCE (0xffffu << 16) #define GUC_CTB_MSG_0_FORMAT (0xfu << 12) #define GUC_CTB_FORMAT_HXG 0u #define GUC_CTB_MSG_0_RESERVED (0xfu << 8) #define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0) +#define GUC_CTB_MAX_DWORDS 255 /** * DOC: CTB HXG Message diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h index 29e414c82d56..534a39db7772 100644 --- a/drivers/gpu/drm/xe/abi/guc_messages_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h @@ -24,6 +24,7 @@ * | | 30:28 | **TYPE** - message type | * | | | - _`GUC_HXG_TYPE_REQUEST` = 0 | * | | | - _`GUC_HXG_TYPE_EVENT` = 1 | + * | | | - _`GUC_HXG_TYPE_FAST_REQUEST` = 2 | * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 | * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 | * | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 | @@ -46,6 +47,7 @@ #define GUC_HXG_MSG_0_TYPE (0x7u << 28) #define GUC_HXG_TYPE_REQUEST 0u #define GUC_HXG_TYPE_EVENT 1u +#define GUC_HXG_TYPE_FAST_REQUEST 2u #define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u #define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u #define GUC_HXG_TYPE_RESPONSE_FAILURE 6u diff --git a/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h new file mode 100644 index 000000000000..747e428de421 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_relay_actions_abi.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _ABI_GUC_RELAY_ACTIONS_ABI_H_ +#define _ABI_GUC_RELAY_ACTIONS_ABI_H_ + +/** + * DOC: GuC Relay Debug Actions + * + * This range of action codes is reserved for debugging purposes only and should + * be used only on debug builds. These actions may not be supported by the + * production drivers. Their definitions could be changed in the future. + * + * _`GUC_RELAY_ACTION_DEBUG_ONLY_START` = 0xDEB0 + * _`GUC_RELAY_ACTION_DEBUG_ONLY_END` = 0xDEFF + */ + +#define GUC_RELAY_ACTION_DEBUG_ONLY_START 0xDEB0 +#define GUC_RELAY_ACTION_DEBUG_ONLY_END 0xDEFF + +/** + * DOC: VFXPF_TESTLOOP + * + * This `Relay Message`_ is used to selftest the `GuC Relay Communication`_. + * + * The following opcodes are defined: + * VFXPF_TESTLOOP_OPCODE_NOP_ will return no data. + * VFXPF_TESTLOOP_OPCODE_BUSY_ will reply with BUSY response first. + * VFXPF_TESTLOOP_OPCODE_RETRY_ will reply with RETRY response instead. + * VFXPF_TESTLOOP_OPCODE_ECHO_ will return same data as received. + * VFXPF_TESTLOOP_OPCODE_FAIL_ will always fail with error. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ | + * | | | or GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | **OPCODE** | + * | | | - _`VFXPF_TESTLOOP_OPCODE_NOP` = 0x0 | + * | | | - _`VFXPF_TESTLOOP_OPCODE_BUSY` = 0xB | + * | | | - _`VFXPF_TESTLOOP_OPCODE_RETRY` = 0xD | + * | | | - _`VFXPF_TESTLOOP_OPCODE_ECHO` = 0xE | + * | | | - _`VFXPF_TESTLOOP_OPCODE_FAIL` = 0xF | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`IOV_ACTION_SELFTEST_RELAY` | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **DATA1** = optional, depends on **OPCODE**: | + * | | | for VFXPF_TESTLOOP_OPCODE_BUSY_: time in ms for reply | + * | | | for VFXPF_TESTLOOP_OPCODE_FAIL_: expected error | + * | | | for VFXPF_TESTLOOP_OPCODE_ECHO_: payload | + * +---+-------+--------------------------------------------------------------+ + * |...| 31:0 | **DATAn** = only for **OPCODE** VFXPF_TESTLOOP_OPCODE_ECHO_ | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + * |...| 31:0 | DATAn = only for **OPCODE** VFXPF_TESTLOOP_OPCODE_ECHO_ | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_RELAY_ACTION_VFXPF_TESTLOOP (GUC_RELAY_ACTION_DEBUG_ONLY_START + 1) +#define VFXPF_TESTLOOP_OPCODE_NOP 0x0 +#define VFXPF_TESTLOOP_OPCODE_BUSY 0xB +#define VFXPF_TESTLOOP_OPCODE_RETRY 0xD +#define VFXPF_TESTLOOP_OPCODE_ECHO 0xE +#define VFXPF_TESTLOOP_OPCODE_FAIL 0xF + +#endif diff --git a/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h b/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h new file mode 100644 index 000000000000..f92625f04796 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_relay_communication_abi.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _ABI_GUC_RELAY_COMMUNICATION_ABI_H +#define _ABI_GUC_RELAY_COMMUNICATION_ABI_H + +#include <linux/build_bug.h> + +#include "guc_actions_sriov_abi.h" +#include "guc_communication_ctb_abi.h" +#include "guc_messages_abi.h" + +/** + * DOC: GuC Relay Communication + * + * The communication between Virtual Function (VF) drivers and Physical Function + * (PF) drivers is based on the GuC firmware acting as a proxy (relay) agent. + * + * To communicate with the PF driver, VF's drivers use `VF2GUC_RELAY_TO_PF`_ + * action that takes the `Relay Message`_ as opaque payload and requires the + * relay message identifier (RID) as additional parameter. + * + * This identifier is used by the drivers to match related messages. + * + * The GuC forwards this `Relay Message`_ and its identifier to the PF driver + * in `GUC2PF_RELAY_FROM_VF`_ action. This event message additionally contains + * the identifier of the origin VF (VFID). + * + * Likewise, to communicate with the VF drivers, PF driver use + * `VF2GUC_RELAY_TO_PF`_ action that in addition to the `Relay Message`_ + * and the relay message identifier (RID) also takes the target VF identifier. + * + * The GuC uses this target VFID from the message to select where to send the + * `GUC2VF_RELAY_FROM_PF`_ with the embedded `Relay Message`_ with response:: + * + * VF GuC PF + * | | | + * [ ] VF2GUC_RELAY_TO_PF | | + * [ ]---------------------------> [ ] | + * [ ] { rid, msg } [ ] | + * [ ] [ ] GUC2PF_RELAY_FROM_VF | + * [ ] [ ]---------------------------> [ ] + * [ ] | { VFID, rid, msg } [ ] + * [ ] | [ ] + * [ ] | PF2GUC_RELAY_TO_VF [ ] + * [ ] [ ] <---------------------------[ ] + * [ ] [ ] { VFID, rid, reply } | + * [ ] GUC2VF_RELAY_FROM_PF [ ] | + * [ ] <---------------------------[ ] | + * | { rid, reply } | | + * | | | + * + * It is also possible that PF driver will initiate communication with the + * selected VF driver. The same GuC action messages will be used:: + * + * VF GuC PF + * | | | + * | | PF2GUC_RELAY_TO_VF [ ] + * | [ ] <---------------------------[ ] + * | [ ] { VFID, rid, msg } [ ] + * | GUC2VF_RELAY_FROM_PF [ ] [ ] + * [ ] <---------------------------[ ] [ ] + * [ ] { rid, msg } | [ ] + * [ ] | [ ] + * [ ] VF2GUC_RELAY_TO_PF | [ ] + * [ ]---------------------------> [ ] [ ] + * | { rid, reply } [ ] [ ] + * | [ ] GUC2PF_RELAY_FROM_VF [ ] + * | [ ]---------------------------> [ ] + * | | { VFID, rid, reply } | + * | | | + */ + +/** + * DOC: Relay Message + * + * The `Relay Message`_ is used by Physical Function (PF) driver and Virtual + * Function (VF) drivers to communicate using `GuC Relay Communication`_. + * + * Format of the `Relay Message`_ follows format of the generic `HXG Message`_. + * + * +--------------------------------------------------------------------------+ + * | `Relay Message`_ | + * +==========================================================================+ + * | `HXG Message`_ | + * +--------------------------------------------------------------------------+ + * + * Maximum length of the `Relay Message`_ is limited by the maximum length of + * the `CTB HXG Message`_ and format of the `GUC2PF_RELAY_FROM_VF`_ message. + */ + +#define GUC_RELAY_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN +#define GUC_RELAY_MSG_MAX_LEN \ + (GUC_CTB_MAX_DWORDS - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN) + +static_assert(PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN > + VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN); + +/** + * DOC: Relay Error Codes + * + * The `GuC Relay Communication`_ can be used to pass `Relay Message`_ between + * drivers that run on different Operating Systems. To help in troubleshooting, + * `GuC Relay Communication`_ uses error codes that mostly match errno values. + */ + +#define GUC_RELAY_ERROR_UNDISCLOSED 0 +#define GUC_RELAY_ERROR_OPERATION_NOT_PERMITTED 1 /* EPERM */ +#define GUC_RELAY_ERROR_PERMISSION_DENIED 13 /* EACCES */ +#define GUC_RELAY_ERROR_INVALID_ARGUMENT 22 /* EINVAL */ +#define GUC_RELAY_ERROR_INVALID_REQUEST_CODE 56 /* EBADRQC */ +#define GUC_RELAY_ERROR_NO_DATA_AVAILABLE 61 /* ENODATA */ +#define GUC_RELAY_ERROR_PROTOCOL_ERROR 71 /* EPROTO */ +#define GUC_RELAY_ERROR_MESSAGE_SIZE 90 /* EMSGSIZE */ + +#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h index 68d9f6116bdf..777c20ceabab 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h @@ -10,7 +10,7 @@ #include "xe_bo.h" -#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT) +#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */ static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n) { diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index 5d2a77b52db4..420eba0e4be0 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -162,18 +162,18 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) #include "intel_wakeref.h" -static inline bool intel_runtime_pm_get(struct xe_runtime_pm *pm) +static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm) { struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); if (xe_pm_runtime_get(xe) < 0) { xe_pm_runtime_put(xe); - return false; + return 0; } - return true; + return 1; } -static inline bool intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) +static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) { struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); @@ -187,7 +187,7 @@ static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm) xe_pm_runtime_put(xe); } -static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, bool wakeref) +static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref) { if (wakeref) intel_runtime_pm_put_unchecked(pm); diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h index 888e7a87a925..bd233007c1b7 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h @@ -19,6 +19,9 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, int err; u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT; + if (align) + size = ALIGN(size, align); + bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), NULL, size, start, end, ttm_bo_type_kernel, flags); diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 74391d9b11ae..e4db069f0db3 100644 --- a/drivers/gpu/drm/xe/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) int xe_display_init_nommio(struct xe_device *xe) { - int err; - if (!xe->info.enable_display) return 0; @@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe) /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(xe); - err = intel_power_domains_init(xe); - if (err) - return err; - return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); } diff --git a/drivers/gpu/drm/xe/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index 710e56180b52..710e56180b52 100644 --- a/drivers/gpu/drm/xe/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index ccf83c12b545..866d1dd6eeb4 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -10,6 +10,7 @@ #include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_crtc.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fb.h" @@ -18,19 +19,20 @@ #include "intel_plane_initial.h" static bool -intel_reuse_initial_plane_obj(struct drm_i915_private *i915, - const struct intel_initial_plane_config *plane_config, +intel_reuse_initial_plane_obj(struct intel_crtc *this, + const struct intel_initial_plane_config plane_configs[], struct drm_framebuffer **fb) { + struct drm_i915_private *i915 = to_i915(this->base.dev); struct intel_crtc *crtc; for_each_intel_crtc(&i915->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_plane_state *plane_state = + const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); if (!crtc_state->uapi.active) continue; @@ -38,7 +40,7 @@ intel_reuse_initial_plane_obj(struct drm_i915_private *i915, if (!plane_state->ggtt_vma) continue; - if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { + if (plane_configs[this->pipe].base == plane_configs[crtc->pipe].base) { *fb = plane_state->hw.fb; return true; } @@ -178,10 +180,10 @@ err_bo: static void intel_find_initial_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) + struct intel_initial_plane_config plane_configs[]) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_initial_plane_config *plane_config = + &plane_configs[crtc->pipe]; struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = @@ -201,7 +203,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, if (intel_alloc_initial_plane_obj(crtc, plane_config)) fb = &plane_config->fb->base; - else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb)) + else if (!intel_reuse_initial_plane_obj(crtc, plane_configs, &fb)) goto nofb; plane_state->uapi.rotation = plane_config->rotation; @@ -267,25 +269,36 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config) } } -void intel_crtc_initial_plane_config(struct intel_crtc *crtc) +void intel_initial_plane_config(struct drm_i915_private *i915) { - struct xe_device *xe = to_xe_device(crtc->base.dev); - struct intel_initial_plane_config plane_config = {}; + struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; - /* - * Note that reserving the BIOS fb up front prevents us - * from stuffing other stolen allocations like the ring - * on top. This prevents some ugliness at boot time, and - * can even allow for smooth boot transitions if the BIOS - * fb is large enough for the active pipe configuration. - */ - xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config); + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_initial_plane_config *plane_config = + &plane_configs[crtc->pipe]; - /* - * If the fb is shared between multiple heads, we'll - * just get the first one. - */ - intel_find_initial_plane_obj(crtc, &plane_config); + if (!to_intel_crtc_state(crtc->base.state)->uapi.active) + continue; - plane_config_fini(&plane_config); + /* + * Note that reserving the BIOS fb up front prevents us + * from stuffing other stolen allocations like the ring + * on top. This prevents some ugliness at boot time, and + * can even allow for smooth boot transitions if the BIOS + * fb is large enough for the active pipe configuration. + */ + i915->display.funcs.display->get_initial_plane_config(crtc, plane_config); + + /* + * If the fb is shared between multiple heads, we'll + * just get the first one. + */ + intel_find_initial_plane_obj(crtc, plane_configs); + + if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config)) + intel_crtc_wait_for_next_vblank(crtc); + + plane_config_fini(plane_config); + } } diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 1cfa96167fde..c74ceb550dce 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -56,6 +56,9 @@ #define MI_FLUSH_IMM_QW REG_FIELD_PREP(MI_FLUSH_DW_LEN_DW, 5 - 2) #define MI_FLUSH_DW_USE_GTT REG_BIT(2) +#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4)) +#define MI_LRM_USE_GGTT REG_BIT(22) + #define MI_BATCH_BUFFER_START __MI_INSTR(0x31) #endif diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 5592774fc690..0b1266c88a6a 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -75,12 +75,17 @@ #define FF_THREAD_MODE(base) XE_REG((base) + 0xa0) #define FF_TESSELATION_DOP_GATE_DISABLE BIT(19) +#define RING_INT_SRC_RPT_PTR(base) XE_REG((base) + 0xa4) #define RING_IMR(base) XE_REG((base) + 0xa8) +#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac) #define RING_EIR(base) XE_REG((base) + 0xb0) #define RING_EMR(base) XE_REG((base) + 0xb4) #define RING_ESR(base) XE_REG((base) + 0xb8) +#define INSTPM(base) XE_REG((base) + 0xc0, XE_REG_OPTION_MASKED) +#define ENABLE_SEMAPHORE_POLL_BIT REG_BIT(13) + #define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED) /* * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. @@ -136,6 +141,7 @@ #define TAIL_ADDR 0x001FFFF8 #define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8) +#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc) #define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4) #define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 1dd361046b5d..15ac2d284d48 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -144,8 +144,12 @@ #define GSCPSMI_BASE XE_REG(0x880c) +#define CCCHKNREG1 XE_REG_MCR(0x8828) +#define ENCOMPPERFFIX REG_BIT(18) + /* Fuse readout registers for GT */ #define XEHP_FUSE4 XE_REG(0x9114) +#define CFEG_WMTP_DISABLE REG_BIT(20) #define CCS_EN_MASK REG_GENMASK(19, 16) #define GT_L3_EXC_MASK REG_GENMASK(6, 4) @@ -288,6 +292,9 @@ #define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4) #define XEHP_LNESPARE REG_BIT(19) +#define L3SQCREG3 XE_REG_MCR(0xb108) +#define COMPPWOVERFETCHEN REG_BIT(28) + #define XEHP_L3SQCREG5 XE_REG_MCR(0xb158) #define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) @@ -344,6 +351,9 @@ #define ROW_CHICKEN3 XE_REG_MCR(0xe49c, XE_REG_OPTION_MASKED) #define DIS_FIX_EOT1_FLUSH REG_BIT(9) +#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED) +#define SLM_WMTP_RESTORE REG_BIT(11) + #define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED) #define UGM_BACKUP_MODE REG_BIT(13) #define MDQ_ARBITRATION_MODE REG_BIT(12) @@ -430,6 +440,15 @@ #define VOLTAGE_MASK REG_GENMASK(10, 0) #define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4)) +#define INTR_GSC REG_BIT(31) +#define INTR_GUC REG_BIT(25) +#define INTR_MGUC REG_BIT(24) +#define INTR_BCS8 REG_BIT(23) +#define INTR_BCS(x) REG_BIT(15 - (x)) +#define INTR_CCS(x) REG_BIT(4 + (x)) +#define INTR_RCS0 REG_BIT(0) +#define INTR_VECS(x) REG_BIT(31 - (x)) +#define INTR_VCS(x) REG_BIT(x) #define RENDER_COPY_INTR_ENABLE XE_REG(0x190030) #define VCS_VECS_INTR_ENABLE XE_REG(0x190034) @@ -446,6 +465,7 @@ #define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x) #define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x) #define OTHER_GUC_INSTANCE 0 +#define OTHER_GSC_HECI2_INSTANCE 3 #define OTHER_GSC_INSTANCE 6 #define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4)) @@ -454,6 +474,7 @@ #define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8) #define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac) #define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0) +#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4) #define GUC_SG_INTR_MASK XE_REG(0x1900e8) #define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec) #define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4) @@ -469,10 +490,4 @@ #define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) #define GT_RENDER_USER_INTERRUPT REG_BIT(0) -#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004) -#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008) -#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068) -#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c) -#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080) - #endif diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h index 4be81abc86ad..1825d8f79db6 100644 --- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h +++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h @@ -14,4 +14,13 @@ #define CTX_PDP0_UDW (0x30 + 1) #define CTX_PDP0_LDW (0x32 + 1) +#define CTX_LRM_INT_MASK_ENABLE 0x50 +#define CTX_INT_MASK_ENABLE_REG (CTX_LRM_INT_MASK_ENABLE + 1) +#define CTX_INT_MASK_ENABLE_PTR (CTX_LRM_INT_MASK_ENABLE + 2) +#define CTX_LRI_INT_REPORT_PTR 0x55 +#define CTX_INT_STATUS_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 1) +#define CTX_INT_STATUS_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 2) +#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3) +#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4) + #endif diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h new file mode 100644 index 000000000000..3dae858508c8 --- /dev/null +++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_PCODE_REGS_H_ +#define _XE_PCODE_REGS_H_ + +#include "regs/xe_reg_defs.h" + +/* + * This file contains addresses of PCODE registers visible through GT MMIO space. + */ + +#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004) +#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008) +#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068) +#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c) +#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080) + +#endif /* _XE_PCODE_REGS_H_ */ diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile index 39d8a0892274..9d1d88af8b2f 100644 --- a/drivers/gpu/drm/xe/tests/Makefile +++ b/drivers/gpu/drm/xe/tests/Makefile @@ -1,10 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 +# "live" kunit tests obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \ xe_bo_test.o \ xe_dma_buf_test.o \ xe_migrate_test.o \ - xe_mocs_test.o \ + xe_mocs_test.o + +# Normal kunit tests +obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o +xe_test-y = xe_test_mod.o \ xe_pci_test.o \ xe_rtp_test.o \ xe_wa_test.o diff --git a/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c b/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c new file mode 100644 index 000000000000..a87a7b4b040a --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_guc_db_mgr_test.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <kunit/test.h> + +#include "xe_device.h" +#include "xe_kunit_helpers.h" + +static int guc_dbm_test_init(struct kunit *test) +{ + struct xe_guc_db_mgr *dbm; + + xe_kunit_helper_xe_device_test_init(test); + dbm = &xe_device_get_gt(test->priv, 0)->uc.guc.dbm; + + mutex_init(dbm_mutex(dbm)); + test->priv = dbm; + return 0; +} + +static void test_empty(struct kunit *test) +{ + struct xe_guc_db_mgr *dbm = test->priv; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, 0), 0); + KUNIT_ASSERT_EQ(test, dbm->count, 0); + + mutex_lock(dbm_mutex(dbm)); + KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0); + mutex_unlock(dbm_mutex(dbm)); + + KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); +} + +static void test_default(struct kunit *test) +{ + struct xe_guc_db_mgr *dbm = test->priv; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0); + KUNIT_ASSERT_EQ(test, dbm->count, GUC_NUM_DOORBELLS); +} + +static const unsigned int guc_dbm_params[] = { + GUC_NUM_DOORBELLS / 64, + GUC_NUM_DOORBELLS / 32, + GUC_NUM_DOORBELLS / 8, + GUC_NUM_DOORBELLS, +}; + +static void uint_param_get_desc(const unsigned int *p, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u", *p); +} + +KUNIT_ARRAY_PARAM(guc_dbm, guc_dbm_params, uint_param_get_desc); + +static void test_size(struct kunit *test) +{ + const unsigned int *p = test->param_value; + struct xe_guc_db_mgr *dbm = test->priv; + unsigned int n; + int id; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0); + KUNIT_ASSERT_EQ(test, dbm->count, *p); + + mutex_lock(dbm_mutex(dbm)); + for (n = 0; n < *p; n++) { + KUNIT_EXPECT_GE(test, id = xe_guc_db_mgr_reserve_id_locked(dbm), 0); + KUNIT_EXPECT_LT(test, id, dbm->count); + } + KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0); + mutex_unlock(dbm_mutex(dbm)); + + mutex_lock(dbm_mutex(dbm)); + for (n = 0; n < *p; n++) + xe_guc_db_mgr_release_id_locked(dbm, n); + mutex_unlock(dbm_mutex(dbm)); +} + +static void test_reuse(struct kunit *test) +{ + const unsigned int *p = test->param_value; + struct xe_guc_db_mgr *dbm = test->priv; + unsigned int n; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, *p), 0); + + mutex_lock(dbm_mutex(dbm)); + for (n = 0; n < *p; n++) + KUNIT_EXPECT_GE(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0); + KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0); + mutex_unlock(dbm_mutex(dbm)); + + mutex_lock(dbm_mutex(dbm)); + for (n = 0; n < *p; n++) { + xe_guc_db_mgr_release_id_locked(dbm, n); + KUNIT_EXPECT_EQ(test, xe_guc_db_mgr_reserve_id_locked(dbm), n); + } + KUNIT_EXPECT_LT(test, xe_guc_db_mgr_reserve_id_locked(dbm), 0); + mutex_unlock(dbm_mutex(dbm)); + + mutex_lock(dbm_mutex(dbm)); + for (n = 0; n < *p; n++) + xe_guc_db_mgr_release_id_locked(dbm, n); + mutex_unlock(dbm_mutex(dbm)); +} + +static void test_range_overlap(struct kunit *test) +{ + const unsigned int *p = test->param_value; + struct xe_guc_db_mgr *dbm = test->priv; + int id1, id2, id3; + unsigned int n; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0); + KUNIT_ASSERT_LE(test, *p, dbm->count); + + KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0); + for (n = 0; n < dbm->count - *p; n++) { + KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); + KUNIT_ASSERT_NE(test, id2, id1); + KUNIT_ASSERT_NE_MSG(test, id2 < id1, id2 > id1 + *p - 1, + "id1=%d id2=%d", id1, id2); + } + KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); + xe_guc_db_mgr_release_range(dbm, 0, dbm->count); + + if (*p >= 1) { + KUNIT_ASSERT_GE(test, id1 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); + KUNIT_ASSERT_GE(test, id2 = xe_guc_db_mgr_reserve_range(dbm, *p - 1, 0), 0); + KUNIT_ASSERT_NE(test, id2, id1); + KUNIT_ASSERT_NE_MSG(test, id1 < id2, id1 > id2 + *p - 2, + "id1=%d id2=%d", id1, id2); + for (n = 0; n < dbm->count - *p; n++) { + KUNIT_ASSERT_GE(test, id3 = xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); + KUNIT_ASSERT_NE(test, id3, id1); + KUNIT_ASSERT_NE(test, id3, id2); + KUNIT_ASSERT_NE_MSG(test, id3 < id2, id3 > id2 + *p - 2, + "id3=%d id2=%d", id3, id2); + } + KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); + xe_guc_db_mgr_release_range(dbm, 0, dbm->count); + } +} + +static void test_range_compact(struct kunit *test) +{ + const unsigned int *p = test->param_value; + struct xe_guc_db_mgr *dbm = test->priv; + unsigned int n; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0); + KUNIT_ASSERT_NE(test, *p, 0); + KUNIT_ASSERT_LE(test, *p, dbm->count); + if (dbm->count % *p) + kunit_skip(test, "must be divisible"); + + KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0); + for (n = 1; n < dbm->count / *p; n++) + KUNIT_ASSERT_GE(test, xe_guc_db_mgr_reserve_range(dbm, *p, 0), 0); + KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, 0), 0); + xe_guc_db_mgr_release_range(dbm, 0, dbm->count); +} + +static void test_range_spare(struct kunit *test) +{ + const unsigned int *p = test->param_value; + struct xe_guc_db_mgr *dbm = test->priv; + int id; + + KUNIT_ASSERT_EQ(test, xe_guc_db_mgr_init(dbm, ~0), 0); + KUNIT_ASSERT_LE(test, *p, dbm->count); + + KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count), 0); + KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p + 1), 0); + KUNIT_ASSERT_EQ(test, id = xe_guc_db_mgr_reserve_range(dbm, *p, dbm->count - *p), 0); + KUNIT_ASSERT_LT(test, xe_guc_db_mgr_reserve_range(dbm, 1, dbm->count - *p), 0); + xe_guc_db_mgr_release_range(dbm, id, *p); +} + +static struct kunit_case guc_dbm_test_cases[] = { + KUNIT_CASE(test_empty), + KUNIT_CASE(test_default), + KUNIT_CASE_PARAM(test_size, guc_dbm_gen_params), + KUNIT_CASE_PARAM(test_reuse, guc_dbm_gen_params), + KUNIT_CASE_PARAM(test_range_overlap, guc_dbm_gen_params), + KUNIT_CASE_PARAM(test_range_compact, guc_dbm_gen_params), + KUNIT_CASE_PARAM(test_range_spare, guc_dbm_gen_params), + {} +}; + +static struct kunit_suite guc_dbm_suite = { + .name = "guc_dbm", + .test_cases = guc_dbm_test_cases, + .init = guc_dbm_test_init, +}; + +kunit_test_suites(&guc_dbm_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c b/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c new file mode 100644 index 000000000000..13701451b923 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_guc_relay_test.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <kunit/static_stub.h> +#include <kunit/test.h> +#include <kunit/test-bug.h> + +#include "xe_device.h" +#include "xe_kunit_helpers.h" +#include "xe_pci_test.h" + +#define TEST_RID 1234 +#define TEST_VFID 5 +#define TEST_LEN 6 +#define TEST_ACTION 0xa +#define TEST_DATA(n) (0xd0 + (n)) + +static int replacement_relay_get_totalvfs(struct xe_guc_relay *relay) +{ + return TEST_VFID; +} + +static int relay_test_init(struct kunit *test) +{ + struct xe_pci_fake_data fake = { + .sriov_mode = XE_SRIOV_MODE_PF, + .platform = XE_TIGERLAKE, /* some random platform */ + .subplatform = XE_SUBPLATFORM_NONE, + }; + struct xe_guc_relay *relay; + struct xe_device *xe; + + test->priv = &fake; + xe_kunit_helper_xe_device_test_init(test); + + xe = test->priv; + KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); + + relay = &xe_device_get_gt(xe, 0)->uc.guc.relay; + kunit_activate_static_stub(test, relay_get_totalvfs, + replacement_relay_get_totalvfs); + + KUNIT_ASSERT_EQ(test, xe_guc_relay_init(relay), 0); + KUNIT_EXPECT_TRUE(test, relay_is_ready(relay)); + relay->last_rid = TEST_RID - 1; + + test->priv = relay; + return 0; +} + +static const u32 TEST_MSG[TEST_LEN] = { + FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) | + FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, TEST_ACTION) | + FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_DATA0, TEST_DATA(0)), + TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4), +}; + +static int replacement_xe_guc_ct_send_recv_always_fails(struct xe_guc_ct *ct, + const u32 *msg, u32 len, + u32 *response_buffer) +{ + struct kunit *test = kunit_get_current_test(); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg); + KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN); + + return -ECOMM; +} + +static int replacement_xe_guc_ct_send_recv_expects_pf2guc_relay(struct xe_guc_ct *ct, + const u32 *msg, u32 len, + u32 *response_buffer) +{ + struct kunit *test = kunit_get_current_test(); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg); + KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN); + KUNIT_ASSERT_EQ(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + TEST_LEN); + KUNIT_EXPECT_EQ(test, GUC_HXG_ORIGIN_HOST, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0])); + KUNIT_EXPECT_EQ(test, GUC_HXG_TYPE_REQUEST, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])); + KUNIT_EXPECT_EQ(test, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF, + FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0])); + KUNIT_EXPECT_EQ(test, TEST_VFID, + FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, msg[1])); + KUNIT_EXPECT_EQ(test, TEST_RID, + FIELD_GET(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, msg[2])); + KUNIT_EXPECT_MEMEQ(test, TEST_MSG, msg + PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN, + sizeof(u32) * TEST_LEN); + return 0; +} + +static const u32 test_guc2pf[GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN] = { + /* transport */ + FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) | + FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) | + FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF), + FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, TEST_VFID), + FIELD_PREP_CONST(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, TEST_RID), + /* payload */ + FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS), +}; + +static const u32 test_guc2vf[GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN] = { + /* transport */ + FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) | + FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) | + FIELD_PREP_CONST(GUC_HXG_EVENT_MSG_0_ACTION, XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF), + FIELD_PREP_CONST(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, TEST_RID), + /* payload */ + FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS), +}; + +static void pf_rejects_guc2pf_too_short(struct kunit *test) +{ + const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN - 1; + struct xe_guc_relay *relay = test->priv; + const u32 *msg = test_guc2pf; + + KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len)); +} + +static void pf_rejects_guc2pf_too_long(struct kunit *test) +{ + const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN + 1; + struct xe_guc_relay *relay = test->priv; + const u32 *msg = test_guc2pf; + + KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2pf(relay, msg, len)); +} + +static void pf_rejects_guc2pf_no_payload(struct kunit *test) +{ + const u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN; + struct xe_guc_relay *relay = test->priv; + const u32 *msg = test_guc2pf; + + KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2pf(relay, msg, len)); +} + +static void pf_fails_no_payload(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + const u32 msg = 0; + + KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, &msg, 0)); +} + +static void pf_fails_bad_origin(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + static const u32 msg[] = { + FIELD_PREP_CONST(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) | + FIELD_PREP_CONST(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS), + }; + u32 len = ARRAY_SIZE(msg); + + KUNIT_ASSERT_EQ(test, -EPROTO, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len)); +} + +static void pf_fails_bad_type(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + const u32 msg[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, 4), /* only 4 is undefined */ + }; + u32 len = ARRAY_SIZE(msg); + + KUNIT_ASSERT_EQ(test, -EBADRQC, relay_process_msg(relay, TEST_VFID, TEST_RID, msg, len)); +} + +static void pf_txn_reports_error(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + struct relay_transaction *txn; + + txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID, + TEST_MSG, TEST_LEN, NULL, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn); + + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_always_fails); + KUNIT_EXPECT_EQ(test, -ECOMM, relay_send_transaction(relay, txn)); + + relay_release_transaction(relay, txn); +} + +static void pf_txn_sends_pf2guc(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + struct relay_transaction *txn; + + txn = __relay_get_transaction(relay, false, TEST_VFID, TEST_RID, + TEST_MSG, TEST_LEN, NULL, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, txn); + + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_expects_pf2guc_relay); + KUNIT_ASSERT_EQ(test, 0, relay_send_transaction(relay, txn)); + + relay_release_transaction(relay, txn); +} + +static void pf_sends_pf2guc(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_expects_pf2guc_relay); + KUNIT_ASSERT_EQ(test, 0, + xe_guc_relay_send_to_vf(relay, TEST_VFID, + TEST_MSG, TEST_LEN, NULL, 0)); +} + +static int replacement_xe_guc_ct_send_recv_loopback_relay(struct xe_guc_ct *ct, + const u32 *msg, u32 len, + u32 *response_buffer) +{ + struct kunit *test = kunit_get_current_test(); + struct xe_guc_relay *relay = test->priv; + u32 *reply = kunit_kzalloc(test, len * sizeof(u32), GFP_KERNEL); + int (*guc2relay)(struct xe_guc_relay *, const u32 *, u32); + u32 action; + int err; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ct); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, msg); + KUNIT_ASSERT_GE(test, len, GUC_HXG_MSG_MIN_LEN); + KUNIT_ASSERT_EQ(test, GUC_HXG_TYPE_REQUEST, + FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])); + KUNIT_ASSERT_GE(test, len, GUC_HXG_REQUEST_MSG_MIN_LEN); + KUNIT_ASSERT_NOT_NULL(test, reply); + + switch (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0])) { + case XE_GUC_ACTION_PF2GUC_RELAY_TO_VF: + KUNIT_ASSERT_GE(test, len, PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN); + action = XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF; + guc2relay = xe_guc_relay_process_guc2pf; + break; + case XE_GUC_ACTION_VF2GUC_RELAY_TO_PF: + KUNIT_ASSERT_GE(test, len, VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN); + action = XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF; + guc2relay = xe_guc_relay_process_guc2vf; + break; + default: + KUNIT_FAIL(test, "bad RELAY action %#x", msg[0]); + return -EINVAL; + } + + reply[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) | + FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION, action); + memcpy(reply + 1, msg + 1, sizeof(u32) * (len - 1)); + + err = guc2relay(relay, reply, len); + KUNIT_EXPECT_EQ(test, err, 0); + + return err; +} + +static void test_requires_relay_testloop(struct kunit *test) +{ + /* + * The debug relay action GUC_RELAY_ACTION_VFXPF_TESTLOOP is available + * only on builds with CONFIG_DRM_XE_DEBUG_SRIOV enabled. + * See "kunit.py --kconfig_add" option if it's missing. + */ + if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) + kunit_skip(test, "requires %s\n", __stringify(CONFIG_DRM_XE_DEBUG_SRIOV)); +} + +static void pf_loopback_nop(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + u32 request[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_NOP), + }; + u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN]; + int ret; + + test_requires_relay_testloop(test); + + kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action); + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_loopback_relay); + ret = xe_guc_relay_send_to_vf(relay, TEST_VFID, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN); + KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]), + GUC_HXG_ORIGIN_HOST); + KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]), + GUC_HXG_TYPE_RESPONSE_SUCCESS); + KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]), 0); +} + +static void pf_loopback_echo(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + u32 request[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_ECHO), + TEST_DATA(1), TEST_DATA(2), TEST_DATA(3), TEST_DATA(4), + }; + u32 response[ARRAY_SIZE(request)]; + unsigned int n; + int ret; + + test_requires_relay_testloop(test); + + kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action); + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_loopback_relay); + ret = xe_guc_relay_send_to_vf(relay, TEST_VFID, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(response)); + KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]), + GUC_HXG_ORIGIN_HOST); + KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]), + GUC_HXG_TYPE_RESPONSE_SUCCESS); + KUNIT_EXPECT_EQ(test, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, response[0]), + ARRAY_SIZE(response)); + for (n = GUC_HXG_RESPONSE_MSG_MIN_LEN; n < ret; n++) + KUNIT_EXPECT_EQ(test, request[n], response[n]); +} + +static void pf_loopback_fail(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + u32 request[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_FAIL), + }; + u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN]; + int ret; + + test_requires_relay_testloop(test); + + kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action); + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_loopback_relay); + ret = xe_guc_relay_send_to_vf(relay, TEST_VFID, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + KUNIT_ASSERT_EQ(test, ret, -EREMOTEIO); +} + +static void pf_loopback_busy(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + u32 request[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_BUSY), + TEST_DATA(0xb), + }; + u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN]; + int ret; + + test_requires_relay_testloop(test); + + kunit_activate_static_stub(test, relay_testonly_nop, relay_process_incoming_action); + kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action); + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_loopback_relay); + ret = xe_guc_relay_send_to_vf(relay, TEST_VFID, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN); +} + +static void pf_loopback_retry(struct kunit *test) +{ + struct xe_guc_relay *relay = test->priv; + u32 request[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VFXPF_TESTLOOP) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_DATA0, VFXPF_TESTLOOP_OPCODE_RETRY), + TEST_DATA(0xd), TEST_DATA(0xd), + }; + u32 response[GUC_HXG_RESPONSE_MSG_MIN_LEN]; + int ret; + + test_requires_relay_testloop(test); + + kunit_activate_static_stub(test, relay_kick_worker, relay_process_incoming_action); + kunit_activate_static_stub(test, xe_guc_ct_send_recv, + replacement_xe_guc_ct_send_recv_loopback_relay); + ret = xe_guc_relay_send_to_vf(relay, TEST_VFID, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + KUNIT_ASSERT_EQ(test, ret, GUC_HXG_RESPONSE_MSG_MIN_LEN); +} + +static struct kunit_case pf_relay_test_cases[] = { + KUNIT_CASE(pf_rejects_guc2pf_too_short), + KUNIT_CASE(pf_rejects_guc2pf_too_long), + KUNIT_CASE(pf_rejects_guc2pf_no_payload), + KUNIT_CASE(pf_fails_no_payload), + KUNIT_CASE(pf_fails_bad_origin), + KUNIT_CASE(pf_fails_bad_type), + KUNIT_CASE(pf_txn_reports_error), + KUNIT_CASE(pf_txn_sends_pf2guc), + KUNIT_CASE(pf_sends_pf2guc), + KUNIT_CASE(pf_loopback_nop), + KUNIT_CASE(pf_loopback_echo), + KUNIT_CASE(pf_loopback_fail), + KUNIT_CASE_SLOW(pf_loopback_busy), + KUNIT_CASE_SLOW(pf_loopback_retry), + {} +}; + +static struct kunit_suite pf_relay_suite = { + .name = "pf_relay", + .test_cases = pf_relay_test_cases, + .init = relay_test_init, +}; + +static void vf_rejects_guc2vf_too_short(struct kunit *test) +{ + const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN - 1; + struct xe_guc_relay *relay = test->priv; + const u32 *msg = test_guc2vf; + + KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len)); +} + +static void vf_rejects_guc2vf_too_long(struct kunit *test) +{ + const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN + 1; + struct xe_guc_relay *relay = test->priv; + const u32 *msg = test_guc2vf; + + KUNIT_ASSERT_EQ(test, -EMSGSIZE, xe_guc_relay_process_guc2vf(relay, msg, len)); +} + +static void vf_rejects_guc2vf_no_payload(struct kunit *test) +{ + const u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN; + struct xe_guc_relay *relay = test->priv; + const u32 *msg = test_guc2vf; + + KUNIT_ASSERT_EQ(test, -EPROTO, xe_guc_relay_process_guc2vf(relay, msg, len)); +} + +static struct kunit_case vf_relay_test_cases[] = { + KUNIT_CASE(vf_rejects_guc2vf_too_short), + KUNIT_CASE(vf_rejects_guc2vf_too_long), + KUNIT_CASE(vf_rejects_guc2vf_no_payload), + {} +}; + +static struct kunit_suite vf_relay_suite = { + .name = "vf_relay", + .test_cases = vf_relay_test_cases, + .init = relay_test_init, +}; + +static void xe_drops_guc2pf_if_not_ready(struct kunit *test) +{ + struct xe_device *xe = test->priv; + struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay; + const u32 *msg = test_guc2pf; + u32 len = GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN; + + KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2pf(relay, msg, len)); +} + +static void xe_drops_guc2vf_if_not_ready(struct kunit *test) +{ + struct xe_device *xe = test->priv; + struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay; + const u32 *msg = test_guc2vf; + u32 len = GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + GUC_RELAY_MSG_MIN_LEN; + + KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_process_guc2vf(relay, msg, len)); +} + +static void xe_rejects_send_if_not_ready(struct kunit *test) +{ + struct xe_device *xe = test->priv; + struct xe_guc_relay *relay = &xe_device_get_gt(xe, 0)->uc.guc.relay; + u32 msg[GUC_RELAY_MSG_MIN_LEN]; + u32 len = ARRAY_SIZE(msg); + + KUNIT_ASSERT_EQ(test, -ENODEV, xe_guc_relay_send_to_pf(relay, msg, len, NULL, 0)); + KUNIT_ASSERT_EQ(test, -ENODEV, relay_send_to(relay, TEST_VFID, msg, len, NULL, 0)); +} + +static struct kunit_case no_relay_test_cases[] = { + KUNIT_CASE(xe_drops_guc2pf_if_not_ready), + KUNIT_CASE(xe_drops_guc2vf_if_not_ready), + KUNIT_CASE(xe_rejects_send_if_not_ready), + {} +}; + +static struct kunit_suite no_relay_suite = { + .name = "no_relay", + .test_cases = no_relay_test_cases, + .init = xe_kunit_helper_xe_device_test_init, +}; + +kunit_test_suites(&no_relay_suite, + &pf_relay_suite, + &vf_relay_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c new file mode 100644 index 000000000000..fefe79b3b75a --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <kunit/test.h> +#include <kunit/static_stub.h> +#include <kunit/visibility.h> + +#include <drm/drm_drv.h> +#include <drm/drm_kunit_helpers.h> + +#include "tests/xe_kunit_helpers.h" +#include "tests/xe_pci_test.h" +#include "xe_device_types.h" + +/** + * xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test. + * @test: the &kunit where this &xe_device will be used + * @dev: The parent device object + * + * This function allocates xe_device using drm_kunit_helper_alloc_device(). + * The xe_device allocation is managed by the test. + * + * @dev should be allocated using drm_kunit_helper_alloc_device(). + * + * This function uses KUNIT_ASSERT to detect any allocation failures. + * + * Return: A pointer to the new &xe_device. + */ +struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test, + struct device *dev) +{ + struct xe_device *xe; + + xe = drm_kunit_helper_alloc_drm_device(test, dev, + struct xe_device, + drm, DRIVER_GEM); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); + return xe; +} +EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_alloc_xe_device); + +static void kunit_action_restore_priv(void *priv) +{ + struct kunit *test = kunit_get_current_test(); + + test->priv = priv; +} + +/** + * xe_kunit_helper_xe_device_test_init - Prepare a &xe_device for a KUnit test. + * @test: the &kunit where this fake &xe_device will be used + * + * This function allocates and initializes a fake &xe_device and stores its + * pointer as &kunit.priv to allow the test code to access it. + * + * This function can be directly used as custom implementation of + * &kunit_suite.init. + * + * It is possible to prepare specific variant of the fake &xe_device by passing + * in &kunit.priv pointer to the struct xe_pci_fake_data supplemented with + * desired parameters prior to calling this function. + * + * This function uses KUNIT_ASSERT to detect any failures. + * + * Return: Always 0. + */ +int xe_kunit_helper_xe_device_test_init(struct kunit *test) +{ + struct xe_device *xe; + struct device *dev; + int err; + + dev = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + xe = xe_kunit_helper_alloc_xe_device(test, dev); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); + + err = xe_pci_fake_device_init(xe); + KUNIT_ASSERT_EQ(test, err, 0); + + err = kunit_add_action_or_reset(test, kunit_action_restore_priv, test->priv); + KUNIT_ASSERT_EQ(test, err, 0); + + test->priv = xe; + return 0; +} +EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init); diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h new file mode 100644 index 000000000000..067a1babf049 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_KUNIT_HELPERS_H_ +#define _XE_KUNIT_HELPERS_H_ + +struct device; +struct kunit; +struct xe_device; + +struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test, + struct device *dev); +int xe_kunit_helper_xe_device_test_init(struct kunit *test); + +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 7dd34f94e809..df5c36b70ab4 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -128,3 +128,39 @@ void xe_live_mocs_kernel_kunit(struct kunit *test) xe_call_for_each_device(mocs_kernel_test_run_device); } EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit); + +static int mocs_reset_test_run_device(struct xe_device *xe) +{ + /* Check the mocs setup is retained over GT reset */ + + struct live_mocs mocs; + struct xe_gt *gt; + unsigned int flags; + int id; + struct kunit *test = xe_cur_kunit(); + + for_each_gt(gt, xe, id) { + flags = live_mocs_init(&mocs, gt); + kunit_info(test, "mocs_reset_test before reset\n"); + if (flags & HAS_GLOBAL_MOCS) + read_mocs_table(gt, &mocs.table); + if (flags & HAS_LNCF_MOCS) + read_l3cc_table(gt, &mocs.table); + + xe_gt_reset_async(gt); + flush_work(>->reset.worker); + + kunit_info(test, "mocs_reset_test after reset\n"); + if (flags & HAS_GLOBAL_MOCS) + read_mocs_table(gt, &mocs.table); + if (flags & HAS_LNCF_MOCS) + read_l3cc_table(gt, &mocs.table); + } + return 0; +} + +void xe_live_mocs_reset_kunit(struct kunit *test) +{ + xe_call_for_each_device(mocs_reset_test_run_device); +} +EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c index ef56bd517b28..ee40f31e1e12 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c @@ -9,6 +9,7 @@ static struct kunit_case xe_mocs_tests[] = { KUNIT_CASE(xe_live_mocs_kernel_kunit), + KUNIT_CASE(xe_live_mocs_reset_kunit), {} }; @@ -21,4 +22,5 @@ kunit_test_suite(xe_mocs_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_mocs kunit test"); MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h index 7faa3575e6c3..e7699d495411 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h +++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.h @@ -9,5 +9,6 @@ struct kunit; void xe_live_mocs_kernel_kunit(struct kunit *test); +void xe_live_mocs_reset_kunit(struct kunit *test); #endif diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index 602793644f61..f62809ca8b51 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -156,6 +156,9 @@ int xe_pci_fake_device_init(struct xe_device *xe) return -ENODEV; done: + xe->sriov.__mode = data && data->sriov_mode ? + data->sriov_mode : XE_SRIOV_MODE_NONE; + kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid); xe_info_init_early(xe, desc, subplatform_desc); diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c index 171e4180f1aa..a6705a536391 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.c +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c @@ -64,8 +64,3 @@ static struct kunit_suite xe_pci_test_suite = { }; kunit_test_suite(xe_pci_test_suite); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("xe_pci kunit test"); -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h index 811ffe5bd9fd..f40dcec83992 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.h +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h @@ -9,6 +9,7 @@ #include <linux/types.h> #include "xe_platform_types.h" +#include "xe_sriov_types.h" struct xe_device; struct xe_graphics_desc; @@ -23,6 +24,7 @@ void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn); void xe_call_for_each_media_ip(xe_media_fn xe_fn); struct xe_pci_fake_data { + enum xe_sriov_mode sriov_mode; enum xe_platform platform; enum xe_subplatform subplatform; u32 graphics_verx100; diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index 4a6972897675..06759d754783 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -15,6 +15,7 @@ #include "regs/xe_reg_defs.h" #include "xe_device.h" #include "xe_device_types.h" +#include "xe_kunit_helpers.h" #include "xe_pci_test.h" #include "xe_reg_sr.h" #include "xe_rtp.h" @@ -276,9 +277,7 @@ static int xe_rtp_test_init(struct kunit *test) dev = drm_kunit_helper_alloc_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); - xe = drm_kunit_helper_alloc_drm_device(test, dev, - struct xe_device, - drm, DRIVER_GEM); + xe = xe_kunit_helper_alloc_xe_device(test, dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); /* Initialize an empty device */ @@ -312,8 +311,3 @@ static struct kunit_suite xe_rtp_test_suite = { }; kunit_test_suite(xe_rtp_test_suite); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("xe_rtp kunit test"); -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c new file mode 100644 index 000000000000..875f3e6f965e --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2023 Intel Corporation + */ +#include <linux/module.h> + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe kunit tests"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c index b4715b78ef3b..44570d888355 100644 --- a/drivers/gpu/drm/xe/tests/xe_wa_test.c +++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c @@ -9,6 +9,7 @@ #include <kunit/test.h> #include "xe_device.h" +#include "xe_kunit_helpers.h" #include "xe_pci_test.h" #include "xe_reg_sr.h" #include "xe_tuning.h" @@ -65,14 +66,8 @@ static const struct platform_test_case cases[] = { PLATFORM_CASE(ALDERLAKE_P, C0), SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0), SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0), - SUBPLATFORM_CASE(DG2, G10, A0), - SUBPLATFORM_CASE(DG2, G10, A1), - SUBPLATFORM_CASE(DG2, G10, B0), SUBPLATFORM_CASE(DG2, G10, C0), - SUBPLATFORM_CASE(DG2, G11, A0), - SUBPLATFORM_CASE(DG2, G11, B0), SUBPLATFORM_CASE(DG2, G11, B1), - SUBPLATFORM_CASE(DG2, G12, A0), SUBPLATFORM_CASE(DG2, G12, A1), GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0), GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0), @@ -105,9 +100,7 @@ static int xe_wa_test_init(struct kunit *test) dev = drm_kunit_helper_alloc_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); - xe = drm_kunit_helper_alloc_drm_device(test, dev, - struct xe_device, - drm, DRIVER_GEM); + xe = xe_kunit_helper_alloc_xe_device(test, dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); test->priv = &data; @@ -160,8 +153,3 @@ static struct kunit_suite xe_rtp_test_suite = { }; kunit_test_suite(xe_rtp_test_suite); - -MODULE_AUTHOR("Intel Corporation"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("xe_wa kunit test"); -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index de89f42247e1..76dfaf1cd200 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -28,6 +28,14 @@ #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" +const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = { + [XE_PL_SYSTEM] = "system", + [XE_PL_TT] = "gtt", + [XE_PL_VRAM0] = "vram0", + [XE_PL_VRAM1] = "vram1", + [XE_PL_STOLEN] = "stolen" +}; + static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, @@ -587,6 +595,8 @@ static int xe_bo_move_notify(struct xe_bo *bo, { struct ttm_buffer_object *ttm_bo = &bo->ttm; struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + struct ttm_resource *old_mem = ttm_bo->resource; + u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; int ret; /* @@ -606,6 +616,18 @@ static int xe_bo_move_notify(struct xe_bo *bo, if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) dma_buf_move_notify(ttm_bo->base.dma_buf); + /* + * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual), + * so if we moved from VRAM make sure to unlink this from the userfault + * tracking. + */ + if (mem_type_is_vram(old_mem_type)) { + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (!list_empty(&bo->vram_userfault_link)) + list_del_init(&bo->vram_userfault_link); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + } + return 0; } @@ -714,8 +736,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, migrate = xe->tiles[0].migrate; xe_assert(xe, migrate); - - trace_xe_bo_move(bo); + trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type); xe_device_mem_access_get(xe); if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) { @@ -1028,7 +1049,7 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) } } -struct ttm_device_funcs xe_ttm_funcs = { +const struct ttm_device_funcs xe_ttm_funcs = { .ttm_tt_create = xe_ttm_tt_create, .ttm_tt_populate = xe_ttm_tt_populate, .ttm_tt_unpopulate = xe_ttm_tt_unpopulate, @@ -1064,6 +1085,11 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) if (bo->vm && xe_bo_is_user(bo)) xe_vm_put(bo->vm); + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (!list_empty(&bo->vram_userfault_link)) + list_del(&bo->vram_userfault_link); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + kfree(bo); } @@ -1111,16 +1137,20 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; struct drm_device *ddev = tbo->base.dev; + struct xe_device *xe = to_xe_device(ddev); + struct xe_bo *bo = ttm_to_xe_bo(tbo); + bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK; vm_fault_t ret; int idx, r = 0; + if (needs_rpm) + xe_device_mem_access_get(xe); + ret = ttm_bo_vm_reserve(tbo, vmf); if (ret) - return ret; + goto out; if (drm_dev_enter(ddev, &idx)) { - struct xe_bo *bo = ttm_to_xe_bo(tbo); - trace_xe_bo_cpu_fault(bo); if (should_migrate_to_system(bo)) { @@ -1138,10 +1168,24 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; + goto out; + /* + * ttm_bo_vm_reserve() already has dma_resv_lock. + */ + if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (list_empty(&bo->vram_userfault_link)) + list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + } dma_resv_unlock(tbo->base.resv); +out: + if (needs_rpm) + xe_device_mem_access_put(xe); + return ret; } @@ -1255,6 +1299,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, #ifdef CONFIG_PROC_FS INIT_LIST_HEAD(&bo->client_link); #endif + INIT_LIST_HEAD(&bo->vram_userfault_link); drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); @@ -1567,6 +1612,38 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til return bo; } +/** + * xe_managed_bo_reinit_in_vram + * @xe: xe device + * @tile: Tile where the new buffer will be created + * @src: Managed buffer object allocated in system memory + * + * Replace a managed src buffer object allocated in system memory with a new + * one allocated in vram, copying the data between them. + * Buffer object in VRAM is not going to have the same GGTT address, the caller + * is responsible for making sure that any old references to it are updated. + * + * Returns 0 for success, negative error code otherwise. + */ +int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) +{ + struct xe_bo *bo; + + xe_assert(xe, IS_DGFX(xe)); + xe_assert(xe, !(*src)->vmap.is_iomem); + + bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size, + XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_GGTT_BIT); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src); + *src = bo; + + return 0; +} + /* * XXX: This is in the VM bind data path, likely should calculate this once and * store, with a recalculation if the BO is moved. @@ -2261,6 +2338,16 @@ int xe_bo_dumb_create(struct drm_file *file_priv, return err; } +void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo) +{ + struct ttm_buffer_object *tbo = &bo->ttm; + struct ttm_device *bdev = tbo->bdev; + + drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); + + list_del_init(&bo->vram_userfault_link); +} + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) #include "tests/xe_bo.c" #endif diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 9b1279aca127..c59ad15961ce 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -44,6 +44,7 @@ #define XE_BO_FIXED_PLACEMENT_BIT BIT(11) #define XE_BO_PAGETABLE BIT(12) #define XE_BO_NEEDS_CPU_ACCESS BIT(13) +#define XE_BO_NEEDS_UC BIT(14) /* this one is trigger internally only */ #define XE_BO_INTERNAL_TEST BIT(30) #define XE_BO_INTERNAL_64K BIT(31) @@ -128,6 +129,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile size_t size, u32 flags); struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, u32 flags); +int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src); int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, u32 bo_flags); @@ -242,12 +244,15 @@ int xe_bo_evict(struct xe_bo *bo, bool force_alloc); int xe_bo_evict_pinned(struct xe_bo *bo); int xe_bo_restore_pinned(struct xe_bo *bo); -extern struct ttm_device_funcs xe_ttm_funcs; +extern const struct ttm_device_funcs xe_ttm_funcs; +extern const char *const xe_mem_type_to_name[]; int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo); + int xe_bo_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 64c2249a4e40..14ef13b7b421 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -88,6 +88,9 @@ struct xe_bo { * objects. */ u16 cpu_caching; + + /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ + struct list_head vram_userfault_link; }; #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base) diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index c56fd7d59f05..01db5b27bec5 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -55,6 +55,7 @@ static int info(struct seq_file *m, void *data) drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist)); drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs)); drm_printf(&p, "has_usm %s\n", str_yes_no(xe->info.has_usm)); + drm_printf(&p, "skip_guc_pc %s\n", str_yes_no(xe->info.skip_guc_pc)); for_each_gt(gt, xe, id) { drm_printf(&p, "gt%d force wake %d\n", id, xe_force_wake_ref(gt_to_fw(gt), XE_FW_GT)); diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 68abc0b195be..68d3d623a05b 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -16,6 +16,8 @@ #include "xe_guc_ct.h" #include "xe_guc_submit.h" #include "xe_hw_engine.h" +#include "xe_sched_job.h" +#include "xe_vm.h" /** * DOC: Xe device coredump @@ -58,11 +60,22 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q) return &q->gt->uc.guc; } +static void xe_devcoredump_deferred_snap_work(struct work_struct *work) +{ + struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); + + xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + if (ss->vm) + xe_vm_snapshot_capture_delayed(ss->vm); + xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); +} + static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { struct xe_devcoredump *coredump = data; - struct xe_devcoredump_snapshot *ss; + struct xe_device *xe = coredump_to_xe(coredump); + struct xe_devcoredump_snapshot *ss = &coredump->snapshot; struct drm_printer p; struct drm_print_iterator iter; struct timespec64 ts; @@ -72,12 +85,14 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, if (!data || !coredump_to_xe(coredump)) return -ENODEV; + /* Ensure delayed work is captured before continuing */ + flush_work(&ss->work); + iter.data = buffer; iter.offset = 0; iter.start = offset; iter.remain = count; - ss = &coredump->snapshot; p = drm_coredump_printer(&iter); drm_printf(&p, "**** Xe Device Coredump ****\n"); @@ -88,16 +103,24 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); ts = ktime_to_timespec64(ss->boot_time); drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); + xe_device_snapshot_print(xe, &p); drm_printf(&p, "\n**** GuC CT ****\n"); xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p); xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p); + drm_printf(&p, "\n**** Job ****\n"); + xe_sched_job_snapshot_print(coredump->snapshot.job, &p); + drm_printf(&p, "\n**** HW Engines ****\n"); for (i = 0; i < XE_NUM_HW_ENGINES; i++) if (coredump->snapshot.hwe[i]) xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i], &p); + if (coredump->snapshot.vm) { + drm_printf(&p, "\n**** VM state ****\n"); + xe_vm_snapshot_print(coredump->snapshot.vm, &p); + } return count - iter.remain; } @@ -111,21 +134,28 @@ static void xe_devcoredump_free(void *data) if (!data || !coredump_to_xe(coredump)) return; + cancel_work_sync(&coredump->snapshot.work); + xe_guc_ct_snapshot_free(coredump->snapshot.ct); xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge); + xe_sched_job_snapshot_free(coredump->snapshot.job); for (i = 0; i < XE_NUM_HW_ENGINES; i++) if (coredump->snapshot.hwe[i]) xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]); + xe_vm_snapshot_free(coredump->snapshot.vm); + /* To prevent stale data on next snapshot, clear everything */ + memset(&coredump->snapshot, 0, sizeof(coredump->snapshot)); coredump->captured = false; drm_info(&coredump_to_xe(coredump)->drm, "Xe device coredump has been deleted.\n"); } static void devcoredump_snapshot(struct xe_devcoredump *coredump, - struct xe_exec_queue *q) + struct xe_sched_job *job) { struct xe_devcoredump_snapshot *ss = &coredump->snapshot; + struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; @@ -137,6 +167,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); + ss->gt = q->gt; + INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); + cookie = dma_fence_begin_signalling(); for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { if (adj_logical_mask & BIT(i)) { @@ -150,7 +183,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true); - coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q); + coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(job); + coredump->snapshot.job = xe_sched_job_snapshot_capture(job); + coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm); for_each_hw_engine(hwe, q->gt, id) { if (hwe->class != q->hwe->class || @@ -161,21 +196,24 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe); } + if (ss->vm) + queue_work(system_unbound_wq, &ss->work); + xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); dma_fence_end_signalling(cookie); } /** * xe_devcoredump - Take the required snapshots and initialize coredump device. - * @q: The faulty xe_exec_queue, where the issue was detected. + * @job: The faulty xe_sched_job, where the issue was detected. * * This function should be called at the crash time within the serialized * gt_reset. It is skipped if we still have the core dump device available * with the information of the 'first' snapshot. */ -void xe_devcoredump(struct xe_exec_queue *q) +void xe_devcoredump(struct xe_sched_job *job) { - struct xe_device *xe = gt_to_xe(q->gt); + struct xe_device *xe = gt_to_xe(job->q->gt); struct xe_devcoredump *coredump = &xe->devcoredump; if (coredump->captured) { @@ -184,7 +222,7 @@ void xe_devcoredump(struct xe_exec_queue *q) } coredump->captured = true; - devcoredump_snapshot(coredump, q); + devcoredump_snapshot(coredump, job); drm_info(&xe->drm, "Xe device coredump has been created\n"); drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n", @@ -194,3 +232,4 @@ void xe_devcoredump(struct xe_exec_queue *q) xe_devcoredump_read, xe_devcoredump_free); } #endif + diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h index 6ac218a5c194..df8671f0b5eb 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.h +++ b/drivers/gpu/drm/xe/xe_devcoredump.h @@ -7,12 +7,12 @@ #define _XE_DEVCOREDUMP_H_ struct xe_device; -struct xe_exec_queue; +struct xe_sched_job; #ifdef CONFIG_DEV_COREDUMP -void xe_devcoredump(struct xe_exec_queue *q); +void xe_devcoredump(struct xe_sched_job *job); #else -static inline void xe_devcoredump(struct xe_exec_queue *q) +static inline void xe_devcoredump(struct xe_sched_job *job) { } #endif diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h index 7fdad9c3d3dd..6f654b63c7f1 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump_types.h +++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h @@ -12,6 +12,7 @@ #include "xe_hw_engine_types.h" struct xe_device; +struct xe_gt; /** * struct xe_devcoredump_snapshot - Crash snapshot @@ -26,13 +27,23 @@ struct xe_devcoredump_snapshot { /** @boot_time: Relative boot time so the uptime can be calculated. */ ktime_t boot_time; + /** @gt: Affected GT, used by forcewake for delayed capture */ + struct xe_gt *gt; + /** @work: Workqueue for deferred capture outside of signaling context */ + struct work_struct work; + /* GuC snapshots */ /** @ct: GuC CT snapshot */ struct xe_guc_ct_snapshot *ct; /** @ge: Guc Engine snapshot */ struct xe_guc_submit_exec_queue_snapshot *ge; + /** @hwe: HW Engine snapshot array */ struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES]; + /** @job: Snapshot of job state */ + struct xe_sched_job_snapshot *job; + /** @vm: Snapshot of VM state */ + struct xe_vm_snapshot *vm; }; /** @@ -44,8 +55,6 @@ struct xe_devcoredump_snapshot { * for reading the information. */ struct xe_devcoredump { - /** @xe: Xe device. */ - struct xe_device *xe; /** @captured: The snapshot of the first hang has already been taken. */ bool captured; /** @snapshot: Snapshot is captured at time of the first crash */ diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 1f0b4b9ce84f..ca85e81fdb44 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -15,32 +15,35 @@ #include <drm/drm_print.h> #include <drm/xe_drm.h> +#include "display/xe_display.h" #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_bo.h" #include "xe_debugfs.h" -#include "xe_display.h" #include "xe_dma_buf.h" #include "xe_drm_client.h" #include "xe_drv.h" -#include "xe_exec_queue.h" #include "xe_exec.h" +#include "xe_exec_queue.h" #include "xe_ggtt.h" +#include "xe_gsc_proxy.h" #include "xe_gt.h" #include "xe_gt_mcr.h" +#include "xe_hwmon.h" #include "xe_irq.h" +#include "xe_memirq.h" #include "xe_mmio.h" #include "xe_module.h" #include "xe_pat.h" #include "xe_pcode.h" #include "xe_pm.h" #include "xe_query.h" +#include "xe_sriov.h" #include "xe_tile.h" #include "xe_ttm_stolen_mgr.h" #include "xe_ttm_sys_mgr.h" #include "xe_vm.h" #include "xe_wait_user_fence.h" -#include "xe_hwmon.h" #ifdef CONFIG_LOCKDEP struct lockdep_map xe_device_mem_access_lockdep_map = { @@ -83,9 +86,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) return 0; } -static void device_kill_persistent_exec_queues(struct xe_device *xe, - struct xe_file *xef); - static void xe_file_close(struct drm_device *dev, struct drm_file *file) { struct xe_device *xe = to_xe_device(dev); @@ -102,8 +102,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) mutex_unlock(&xef->exec_queue.lock); xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); - device_kill_persistent_exec_queues(xe, xef); - mutex_lock(&xef->vm.lock); xa_for_each(&xef->vm.xa, idx, vm) xe_vm_close_and_put(vm); @@ -255,9 +253,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xa_erase(&xe->usm.asid_to_vm, asid); } - drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock); - INIT_LIST_HEAD(&xe->persistent_engines.list); - spin_lock_init(&xe->pinned.lock); INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); INIT_LIST_HEAD(&xe->pinned.external_vram); @@ -432,10 +427,15 @@ int xe_device_probe(struct xe_device *xe) struct xe_tile *tile; struct xe_gt *gt; int err; + u8 last_gt; u8 id; xe_pat_init_early(xe); + err = xe_sriov_init(xe); + if (err) + return err; + xe->info.mem_region_mask = 1; err = xe_display_init_nommio(xe); if (err) @@ -456,6 +456,17 @@ int xe_device_probe(struct xe_device *xe) err = xe_ggtt_init_early(tile->mem.ggtt); if (err) return err; + if (IS_SRIOV_VF(xe)) { + err = xe_memirq_init(&tile->sriov.vf.memirq); + if (err) + return err; + } + } + + for_each_gt(gt, xe, id) { + err = xe_gt_init_hwconfig(gt); + if (err) + return err; } err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe); @@ -510,16 +521,18 @@ int xe_device_probe(struct xe_device *xe) goto err_irq_shutdown; for_each_gt(gt, xe, id) { + last_gt = id; + err = xe_gt_init(gt); if (err) - goto err_irq_shutdown; + goto err_fini_gt; } xe_heci_gsc_init(xe); err = xe_display_init(xe); if (err) - goto err_irq_shutdown; + goto err_fini_gt; err = drm_dev_register(&xe->drm, 0); if (err) @@ -540,6 +553,14 @@ int xe_device_probe(struct xe_device *xe) err_fini_display: xe_display_driver_remove(xe); +err_fini_gt: + for_each_gt(gt, xe, id) { + if (id < last_gt) + xe_gt_remove(gt); + else + break; + } + err_irq_shutdown: xe_irq_shutdown(xe); err: @@ -557,12 +578,18 @@ static void xe_device_remove_display(struct xe_device *xe) void xe_device_remove(struct xe_device *xe) { + struct xe_gt *gt; + u8 id; + xe_device_remove_display(xe); xe_display_fini(xe); xe_heci_gsc_fini(xe); + for_each_gt(gt, xe, id) + xe_gt_remove(gt); + xe_irq_shutdown(xe); } @@ -570,37 +597,6 @@ void xe_device_shutdown(struct xe_device *xe) { } -void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q) -{ - mutex_lock(&xe->persistent_engines.lock); - list_add_tail(&q->persistent.link, &xe->persistent_engines.list); - mutex_unlock(&xe->persistent_engines.lock); -} - -void xe_device_remove_persistent_exec_queues(struct xe_device *xe, - struct xe_exec_queue *q) -{ - mutex_lock(&xe->persistent_engines.lock); - if (!list_empty(&q->persistent.link)) - list_del(&q->persistent.link); - mutex_unlock(&xe->persistent_engines.lock); -} - -static void device_kill_persistent_exec_queues(struct xe_device *xe, - struct xe_file *xef) -{ - struct xe_exec_queue *q, *next; - - mutex_lock(&xe->persistent_engines.lock); - list_for_each_entry_safe(q, next, &xe->persistent_engines.list, - persistent.link) - if (q->persistent.xef == xef) { - xe_exec_queue_kill(q); - list_del_init(&q->persistent.link); - } - mutex_unlock(&xe->persistent_engines.lock); -} - void xe_device_wmb(struct xe_device *xe) { struct xe_gt *gt = xe_root_mmio_gt(xe); @@ -698,3 +694,33 @@ void xe_device_mem_access_put(struct xe_device *xe) xe_assert(xe, ref >= 0); } + +void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) +{ + struct xe_gt *gt; + u8 id; + + drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); + drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); + + for_each_gt(gt, xe, id) { + drm_printf(p, "GT id: %u\n", id); + drm_printf(p, "\tType: %s\n", + gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); + drm_printf(p, "\tIP ver: %u.%u.%u\n", + REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), + REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), + REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); + drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); + } +} + +u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) +{ + return sign_extend64(address, xe->info.va_bits - 1); +} + +u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) +{ + return address & GENMASK_ULL(xe->info.va_bits - 1, 0); +} diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 3da83b233206..14be34d9f543 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe); void xe_device_remove(struct xe_device *xe); void xe_device_shutdown(struct xe_device *xe); -void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q); -void xe_device_remove_persistent_exec_queues(struct xe_device *xe, - struct xe_exec_queue *q); - void xe_device_wmb(struct xe_device *xe); static inline struct xe_file *to_xe_file(const struct drm_file *file) @@ -168,6 +164,16 @@ static inline bool xe_device_has_sriov(struct xe_device *xe) return xe->info.has_sriov; } +static inline bool xe_device_has_memirq(struct xe_device *xe) +{ + return GRAPHICS_VERx100(xe) >= 1250; +} + u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size); +void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p); + +u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address); +u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address); + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 5dc9127a2029..9785eef2e5a4 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -16,6 +16,7 @@ #include "xe_heci_gsc.h" #include "xe_gt_types.h" #include "xe_lmtt_types.h" +#include "xe_memirq_types.h" #include "xe_platform_types.h" #include "xe_pt_types.h" #include "xe_sriov_types.h" @@ -142,10 +143,10 @@ struct xe_tile { * * 8MB-16MB: global GTT */ struct { - /** @size: size of tile's MMIO space */ + /** @mmio.size: size of tile's MMIO space */ size_t size; - /** @regs: pointer to tile's MMIO space (starting with registers) */ + /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */ void __iomem *regs; } mmio; @@ -155,31 +156,31 @@ struct xe_tile { * Each tile has its own additional 256MB (28-bit) MMIO-extension space. */ struct { - /** @size: size of tile's additional MMIO-extension space */ + /** @mmio_ext.size: size of tile's additional MMIO-extension space */ size_t size; - /** @regs: pointer to tile's additional MMIO-extension space */ + /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */ void __iomem *regs; } mmio_ext; /** @mem: memory management info for tile */ struct { /** - * @vram: VRAM info for tile. + * @mem.vram: VRAM info for tile. * * Although VRAM is associated with a specific tile, it can * still be accessed by all tiles' GTs. */ struct xe_mem_region vram; - /** @vram_mgr: VRAM TTM manager */ + /** @mem.vram_mgr: VRAM TTM manager */ struct xe_ttm_vram_mgr *vram_mgr; - /** @ggtt: Global graphics translation table */ + /** @mem.ggtt: Global graphics translation table */ struct xe_ggtt *ggtt; /** - * @kernel_bb_pool: Pool from which batchbuffers are allocated. + * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated. * * Media GT shares a pool with its primary GT. */ @@ -192,6 +193,10 @@ struct xe_tile { /** @sriov.pf.lmtt: Local Memory Translation Table. */ struct xe_lmtt lmtt; } pf; + struct { + /** @sriov.vf.memirq: Memory Based Interrupts. */ + struct xe_memirq memirq; + } vf; } sriov; /** @migrate: Migration helper for vram blits and clearing */ @@ -213,68 +218,68 @@ struct xe_device { /** @info: device info */ struct intel_device_info { - /** @graphics_name: graphics IP name */ + /** @info.graphics_name: graphics IP name */ const char *graphics_name; - /** @media_name: media IP name */ + /** @info.media_name: media IP name */ const char *media_name; - /** @tile_mmio_ext_size: size of MMIO extension space, per-tile */ + /** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */ u32 tile_mmio_ext_size; - /** @graphics_verx100: graphics IP version */ + /** @info.graphics_verx100: graphics IP version */ u32 graphics_verx100; - /** @media_verx100: media IP version */ + /** @info.media_verx100: media IP version */ u32 media_verx100; - /** @mem_region_mask: mask of valid memory regions */ + /** @info.mem_region_mask: mask of valid memory regions */ u32 mem_region_mask; - /** @platform: XE platform enum */ + /** @info.platform: XE platform enum */ enum xe_platform platform; - /** @subplatform: XE subplatform enum */ + /** @info.subplatform: XE subplatform enum */ enum xe_subplatform subplatform; - /** @devid: device ID */ + /** @info.devid: device ID */ u16 devid; - /** @revid: device revision */ + /** @info.revid: device revision */ u8 revid; - /** @step: stepping information for each IP */ + /** @info.step: stepping information for each IP */ struct xe_step_info step; - /** @dma_mask_size: DMA address bits */ + /** @info.dma_mask_size: DMA address bits */ u8 dma_mask_size; - /** @vram_flags: Vram flags */ + /** @info.vram_flags: Vram flags */ u8 vram_flags; - /** @tile_count: Number of tiles */ + /** @info.tile_count: Number of tiles */ u8 tile_count; - /** @gt_count: Total number of GTs for entire device */ + /** @info.gt_count: Total number of GTs for entire device */ u8 gt_count; - /** @vm_max_level: Max VM level */ + /** @info.vm_max_level: Max VM level */ u8 vm_max_level; - /** @va_bits: Maximum bits of a virtual address */ + /** @info.va_bits: Maximum bits of a virtual address */ u8 va_bits; - /** @is_dgfx: is discrete device */ + /** @info.is_dgfx: is discrete device */ u8 is_dgfx:1; - /** @has_asid: Has address space ID */ + /** @info.has_asid: Has address space ID */ u8 has_asid:1; - /** @force_execlist: Forced execlist submission */ + /** @info.force_execlist: Forced execlist submission */ u8 force_execlist:1; - /** @has_flat_ccs: Whether flat CCS metadata is used */ + /** @info.has_flat_ccs: Whether flat CCS metadata is used */ u8 has_flat_ccs:1; - /** @has_llc: Device has a shared CPU+GPU last level cache */ + /** @info.has_llc: Device has a shared CPU+GPU last level cache */ u8 has_llc:1; - /** @has_mmio_ext: Device has extra MMIO address range */ + /** @info.has_mmio_ext: Device has extra MMIO address range */ u8 has_mmio_ext:1; - /** @has_range_tlb_invalidation: Has range based TLB invalidations */ + /** @info.has_range_tlb_invalidation: Has range based TLB invalidations */ u8 has_range_tlb_invalidation:1; - /** @has_sriov: Supports SR-IOV */ + /** @info.has_sriov: Supports SR-IOV */ u8 has_sriov:1; - /** @has_usm: Device has unified shared memory support */ + /** @info.has_usm: Device has unified shared memory support */ u8 has_usm:1; - /** @enable_display: display enabled */ + /** @info.enable_display: display enabled */ u8 enable_display:1; - /** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */ + /** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */ u8 skip_mtcfg:1; - /** @skip_pcode: skip access to PCODE uC */ + /** @info.skip_pcode: skip access to PCODE uC */ u8 skip_pcode:1; - /** @has_heci_gscfi: device has heci gscfi */ + /** @info.has_heci_gscfi: device has heci gscfi */ u8 has_heci_gscfi:1; - /** @skip_guc_pc: Skip GuC based PM feature init */ + /** @info.skip_guc_pc: Skip GuC based PM feature init */ u8 skip_guc_pc:1; #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) @@ -286,10 +291,10 @@ struct xe_device { /** @irq: device interrupt state */ struct { - /** @lock: lock for processing irq's on this device */ + /** @irq.lock: lock for processing irq's on this device */ spinlock_t lock; - /** @enabled: interrupts enabled on this device */ + /** @irq.enabled: interrupts enabled on this device */ bool enabled; } irq; @@ -298,17 +303,17 @@ struct xe_device { /** @mmio: mmio info for device */ struct { - /** @size: size of MMIO space for device */ + /** @mmio.size: size of MMIO space for device */ size_t size; - /** @regs: pointer to MMIO space for device */ + /** @mmio.regs: pointer to MMIO space for device */ void __iomem *regs; } mmio; /** @mem: memory info for device */ struct { - /** @vram: VRAM info for device */ + /** @mem.vram: VRAM info for device */ struct xe_mem_region vram; - /** @sys_mgr: system TTM manager */ + /** @mem.sys_mgr: system TTM manager */ struct ttm_resource_manager sys_mgr; } mem; @@ -316,48 +321,42 @@ struct xe_device { struct { /** @sriov.__mode: SR-IOV mode (Don't access directly!) */ enum xe_sriov_mode __mode; + /** @sriov.wq: workqueue used by the virtualization workers */ + struct workqueue_struct *wq; } sriov; /** @clients: drm clients info */ struct { - /** @lock: Protects drm clients info */ + /** @clients.lock: Protects drm clients info */ spinlock_t lock; - /** @count: number of drm clients */ + /** @clients.count: number of drm clients */ u64 count; } clients; /** @usm: unified memory state */ struct { - /** @asid: convert a ASID to VM */ + /** @usm.asid: convert a ASID to VM */ struct xarray asid_to_vm; - /** @next_asid: next ASID, used to cyclical alloc asids */ + /** @usm.next_asid: next ASID, used to cyclical alloc asids */ u32 next_asid; - /** @num_vm_in_fault_mode: number of VM in fault mode */ + /** @usm.num_vm_in_fault_mode: number of VM in fault mode */ u32 num_vm_in_fault_mode; - /** @num_vm_in_non_fault_mode: number of VM in non-fault mode */ + /** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */ u32 num_vm_in_non_fault_mode; - /** @lock: protects UM state */ + /** @usm.lock: protects UM state */ struct mutex lock; } usm; - /** @persistent_engines: engines that are closed but still running */ - struct { - /** @lock: protects persistent engines */ - struct mutex lock; - /** @list: list of persistent engines */ - struct list_head list; - } persistent_engines; - /** @pinned: pinned BO state */ struct { - /** @lock: protected pinned BO list state */ + /** @pinned.lock: protected pinned BO list state */ spinlock_t lock; - /** @evicted: pinned kernel BO that are present */ + /** @pinned.kernel_bo_present: pinned kernel BO that are present */ struct list_head kernel_bo_present; - /** @evicted: pinned BO that have been evicted */ + /** @pinned.evicted: pinned BO that have been evicted */ struct list_head evicted; - /** @external_vram: pinned external BO in vram*/ + /** @pinned.external_vram: pinned external BO in vram*/ struct list_head external_vram; } pinned; @@ -378,36 +377,57 @@ struct xe_device { * triggering additional actions when they occur. */ struct { - /** @ref: ref count of memory accesses */ + /** @mem_access.ref: ref count of memory accesses */ atomic_t ref; + + /** + * @mem_access.vram_userfault: Encapsulate vram_userfault + * related stuff + */ + struct { + /** + * @mem_access.vram_userfault.lock: Protects access to + * @vram_usefault.list Using mutex instead of spinlock + * as lock is applied to entire list operation which + * may sleep + */ + struct mutex lock; + + /** + * @mem_access.vram_userfault.list: Keep list of userfaulted + * vram bo, which require to release their mmap mappings + * at runtime suspend path + */ + struct list_head list; + } vram_userfault; } mem_access; /** * @pat: Encapsulate PAT related stuff */ struct { - /** Internal operations to abstract platforms */ + /** @pat.ops: Internal operations to abstract platforms */ const struct xe_pat_ops *ops; - /** PAT table to program in the HW */ + /** @pat.table: PAT table to program in the HW */ const struct xe_pat_table_entry *table; - /** Number of PAT entries */ + /** @pat.n_entries: Number of PAT entries */ int n_entries; u32 idx[__XE_CACHE_LEVEL_COUNT]; } pat; /** @d3cold: Encapsulate d3cold related stuff */ struct { - /** capable: Indicates if root port is d3cold capable */ + /** @d3cold.capable: Indicates if root port is d3cold capable */ bool capable; - /** @allowed: Indicates if d3cold is a valid device state */ + /** @d3cold.allowed: Indicates if d3cold is a valid device state */ bool allowed; - /** @power_lost: Indicates if card has really lost power. */ + /** @d3cold.power_lost: Indicates if card has really lost power. */ bool power_lost; /** - * @vram_threshold: + * @d3cold.vram_threshold: * * This represents the permissible threshold(in megabytes) * for vram save/restore. d3cold will be disallowed, @@ -416,7 +436,7 @@ struct xe_device { * Default threshold value is 300mb. */ u32 vram_threshold; - /** @lock: protect vram_threshold */ + /** @d3cold.lock: protect vram_threshold */ struct mutex lock; } d3cold; @@ -524,17 +544,17 @@ struct xe_file { /** @vm: VM state for file */ struct { - /** @xe: xarray to store VMs */ + /** @vm.xe: xarray to store VMs */ struct xarray xa; - /** @lock: protects file VM state */ + /** @vm.lock: protects file VM state */ struct mutex lock; } vm; /** @exec_queue: Submission exec queue state for file */ struct { - /** @xe: xarray to store engines */ + /** @exec_queue.xe: xarray to store engines */ struct xarray xa; - /** @lock: protects file engine state */ + /** @exec_queue.lock: protects file engine state */ struct mutex lock; } exec_queue; diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index 82d1305e831f..87c10bd7958b 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -113,7 +113,7 @@ static void bo_meminfo(struct xe_bo *bo, else mem_type = XE_PL_TT; - if (bo->ttm.base.handle_count > 1) + if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base)) stats[mem_type].shared += sz; else stats[mem_type].private += sz; @@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo, static void show_meminfo(struct drm_printer *p, struct drm_file *file) { - static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = { - [XE_PL_SYSTEM] = "system", - [XE_PL_TT] = "gtt", - [XE_PL_VRAM0] = "vram0", - [XE_PL_VRAM1] = "vram1", - [4 ... 6] = NULL, - [XE_PL_STOLEN] = "stolen" - }; struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {}; struct xe_file *xef = file->driver_priv; struct ttm_device *bdev = &xef->xe->ttm; @@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) spin_unlock(&client->bos_lock); for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) { - if (!mem_type_to_name[mem_type]) + if (!xe_mem_type_to_name[mem_type]) continue; man = ttm_manager_type(bdev, mem_type); @@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) DRM_GEM_OBJECT_RESIDENT | (mem_type != XE_PL_SYSTEM ? 0 : DRM_GEM_OBJECT_PURGEABLE), - mem_type_to_name[mem_type]); + xe_mem_type_to_name[mem_type]); } } } diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 17f26952e665..952496c6260d 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -96,7 +96,46 @@ static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec) { - return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); + struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); + struct drm_gem_object *obj; + unsigned long index; + int num_fences; + int ret; + + ret = drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); + if (ret) + return ret; + + /* + * 1 fence slot for the final submit, and 1 more for every per-tile for + * GPU bind and 1 extra for CPU bind. Note that there are potentially + * many vma per object/dma-resv, however the fence slot will just be + * re-used, since they are largely the same timeline and the seqno + * should be in order. In the case of CPU bind there is dummy fence used + * for all CPU binds, so no need to have a per-tile slot for that. + */ + num_fences = 1 + 1 + vm->xe->info.tile_count; + + /* + * We don't know upfront exactly how many fence slots we will need at + * the start of the exec, since the TTM bo_validate above can consume + * numerous fence slots. Also due to how the dma_resv_reserve_fences() + * works it only ensures that at least that many fence slots are + * available i.e if there are already 10 slots available and we reserve + * two more, it can just noop without reserving anything. With this it + * is quite possible that TTM steals some of the fence slots and then + * when it comes time to do the vma binding and final exec stage we are + * lacking enough fence slots, leading to some nasty BUG_ON() when + * adding the fences. Hence just add our own fences here, after the + * validate stage. + */ + drm_exec_for_each_locked_object(&vm_exec->exec, index, obj) { + ret = dma_resv_reserve_fences(obj->resv, num_fences); + if (ret) + return ret; + } + + return 0; } int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -197,7 +236,6 @@ retry: } vm_exec.vm = &vm->gpuvm; - vm_exec.num_fences = 1 + vm->xe->info.tile_count; vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT; if (xe_vm_in_lr_mode(vm)) { drm_exec_init(exec, vm_exec.flags, 0); diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index bcfc4127c7c5..4bb8f897bf15 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -30,21 +30,23 @@ enum xe_exec_queue_sched_prop { XE_EXEC_QUEUE_SCHED_PROP_MAX = 3, }; -static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, - struct xe_vm *vm, - u32 logical_mask, - u16 width, struct xe_hw_engine *hwe, - u32 flags) +static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, + u64 extensions, int ext_number, bool create); + +static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, + struct xe_vm *vm, + u32 logical_mask, + u16 width, struct xe_hw_engine *hwe, + u32 flags, u64 extensions) { struct xe_exec_queue *q; struct xe_gt *gt = hwe->gt; int err; - int i; /* only kernel queues can be permanent */ XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); - q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL); + q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); if (!q) return ERR_PTR(-ENOMEM); @@ -52,38 +54,63 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, q->flags = flags; q->hwe = hwe; q->gt = gt; - if (vm) - q->vm = xe_vm_get(vm); q->class = hwe->class; q->width = width; q->logical_mask = logical_mask; q->fence_irq = >->fence_irq[hwe->class]; q->ring_ops = gt->ring_ops[hwe->class]; q->ops = gt->exec_queue_ops; - INIT_LIST_HEAD(&q->persistent.link); INIT_LIST_HEAD(&q->compute.link); INIT_LIST_HEAD(&q->multi_gt_link); q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; q->sched_props.preempt_timeout_us = hwe->eclass->sched_props.preempt_timeout_us; + q->sched_props.job_timeout_ms = + hwe->eclass->sched_props.job_timeout_ms; if (q->flags & EXEC_QUEUE_FLAG_KERNEL && q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; else q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; + if (extensions) { + /* + * may set q->usm, must come before xe_lrc_init(), + * may overwrite q->sched_props, must come before q->ops->init() + */ + err = exec_queue_user_extensions(xe, q, extensions, 0, true); + if (err) { + kfree(q); + return ERR_PTR(err); + } + } + + if (vm) + q->vm = xe_vm_get(vm); + if (xe_exec_queue_is_parallel(q)) { q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO; } - if (q->flags & EXEC_QUEUE_FLAG_VM) { - q->bind.fence_ctx = dma_fence_context_alloc(1); - q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO; - } - for (i = 0; i < width; ++i) { - err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K); + return q; +} + +static void __xe_exec_queue_free(struct xe_exec_queue *q) +{ + if (q->vm) + xe_vm_put(q->vm); + kfree(q); +} + +static int __xe_exec_queue_init(struct xe_exec_queue *q) +{ + struct xe_device *xe = gt_to_xe(q->gt); + int i, err; + + for (i = 0; i < q->width; ++i) { + err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K); if (err) goto err_lrc; } @@ -100,35 +127,47 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, * can perform GuC CT actions when needed. Caller is expected to have * already grabbed the rpm ref outside any sensitive locks. */ - if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm)) + if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); - return q; + return 0; err_lrc: for (i = i - 1; i >= 0; --i) xe_lrc_finish(q->lrc + i); - kfree(q); - return ERR_PTR(err); + return err; } struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, u32 logical_mask, u16 width, - struct xe_hw_engine *hwe, u32 flags) + struct xe_hw_engine *hwe, u32 flags, + u64 extensions) { struct xe_exec_queue *q; int err; + q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, + extensions); + if (IS_ERR(q)) + return q; + if (vm) { err = xe_vm_lock(vm, true); if (err) - return ERR_PTR(err); + goto err_post_alloc; } - q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags); + + err = __xe_exec_queue_init(q); if (vm) xe_vm_unlock(vm); + if (err) + goto err_post_alloc; return q; + +err_post_alloc: + __xe_exec_queue_free(q); + return ERR_PTR(err); } struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, @@ -153,7 +192,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe if (!logical_mask) return ERR_PTR(-ENODEV); - return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags); + return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0); } void xe_exec_queue_destroy(struct kref *ref) @@ -179,10 +218,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) xe_lrc_finish(q->lrc + i); if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) xe_device_mem_access_put(gt_to_xe(q->gt)); - if (q->vm) - xe_vm_put(q->vm); - - kfree(q); + __xe_exec_queue_free(q); } void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) @@ -240,7 +276,11 @@ static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) return -EPERM; - return q->ops->set_priority(q, value); + if (!create) + return q->ops->set_priority(q, value); + + q->sched_props.priority = value; + return 0; } static bool xe_exec_queue_enforce_schedule_limit(void) @@ -307,7 +347,11 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue * !xe_hw_engine_timeout_in_range(value, min, max)) return -EINVAL; - return q->ops->set_timeslice(q, value); + if (!create) + return q->ops->set_timeslice(q, value); + + q->sched_props.timeslice_us = value; + return 0; } static int exec_queue_set_preemption_timeout(struct xe_device *xe, @@ -323,23 +367,10 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe, !xe_hw_engine_timeout_in_range(value, min, max)) return -EINVAL; - return q->ops->set_preempt_timeout(q, value); -} - -static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q, - u64 value, bool create) -{ - if (XE_IOCTL_DBG(xe, !create)) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm))) - return -EINVAL; - - if (value) - q->flags |= EXEC_QUEUE_FLAG_PERSISTENT; - else - q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT; + if (!create) + return q->ops->set_preempt_timeout(q, value); + q->sched_props.preempt_timeout_us = value; return 0; } @@ -358,7 +389,9 @@ static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue !xe_hw_engine_timeout_in_range(value, min, max)) return -EINVAL; - return q->ops->set_job_timeout(q, value); + q->sched_props.job_timeout_ms = value; + + return 0; } static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q, @@ -414,7 +447,6 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify, @@ -441,6 +473,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe, return -EINVAL; idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); + if (!exec_queue_set_property_funcs[idx]) + return -EINVAL; + return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); } @@ -633,6 +668,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { for_each_gt(gt, xe, id) { struct xe_exec_queue *new; + u32 flags; if (xe_gt_is_media_type(gt)) continue; @@ -651,14 +687,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, /* The migration vm doesn't hold rpm ref */ xe_device_mem_access_get(xe); + flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0); + migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate); new = xe_exec_queue_create(xe, migrate_vm, logical_mask, - args->width, hwe, - EXEC_QUEUE_FLAG_PERSISTENT | - EXEC_QUEUE_FLAG_VM | - (id ? - EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : - 0)); + args->width, hwe, flags, + args->extensions); xe_device_mem_access_put(xe); /* now held by engine */ @@ -704,9 +738,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, } q = xe_exec_queue_create(xe, vm, logical_mask, - args->width, hwe, - xe_vm_in_lr_mode(vm) ? 0 : - EXEC_QUEUE_FLAG_PERSISTENT); + args->width, hwe, 0, + args->extensions); up_read(&vm->lock); xe_vm_put(vm); if (IS_ERR(q)) @@ -722,14 +755,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, } } - if (args->extensions) { - err = exec_queue_user_extensions(xe, q, args->extensions, 0, true); - if (XE_IOCTL_DBG(xe, err)) - goto kill_exec_queue; - } - - q->persistent.xef = xef; - mutex_lock(&xef->exec_queue.lock); err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); mutex_unlock(&xef->exec_queue.lock); @@ -872,10 +897,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT)) - xe_exec_queue_kill(q); - else - xe_device_add_persistent_exec_queues(xe, q); + xe_exec_queue_kill(q); trace_xe_exec_queue_close(q); xe_exec_queue_put(q); @@ -926,20 +948,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) * @q: The exec queue * @vm: The VM the engine does a bind or exec for * - * Get last fence, does not take a ref + * Get last fence, takes a ref * * Returns: last fence if not signaled, dma fence stub if signaled */ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, struct xe_vm *vm) { + struct dma_fence *fence; + xe_exec_queue_last_fence_lockdep_assert(q, vm); if (q->last_fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) xe_exec_queue_last_fence_put(q, vm); - return q->last_fence ? q->last_fence : dma_fence_get_stub(); + fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); + dma_fence_get(fence); + return fence; } /** diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h index d959cc4a1a82..02ce8d204622 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.h +++ b/drivers/gpu/drm/xe/xe_exec_queue.h @@ -16,7 +16,8 @@ struct xe_file; struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, u32 logical_mask, u16 width, - struct xe_hw_engine *hw_engine, u32 flags); + struct xe_hw_engine *hw_engine, u32 flags, + u64 extensions); struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, struct xe_vm *vm, enum xe_engine_class class, u32 flags); diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 8d4b7feb8c30..c40240e88068 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -106,67 +106,48 @@ struct xe_exec_queue { }; /** - * @persistent: persistent exec queue state + * @parallel: parallel submission state */ struct { - /** @xef: file which this exec queue belongs to */ - struct xe_file *xef; - /** @link: link in list of persistent exec queues */ - struct list_head link; - } persistent; - - union { - /** - * @parallel: parallel submission state - */ - struct { - /** @composite_fence_ctx: context composite fence */ - u64 composite_fence_ctx; - /** @composite_fence_seqno: seqno for composite fence */ - u32 composite_fence_seqno; - } parallel; - /** - * @bind: bind submission state - */ - struct { - /** @fence_ctx: context bind fence */ - u64 fence_ctx; - /** @fence_seqno: seqno for bind fence */ - u32 fence_seqno; - } bind; - }; + /** @parallel.composite_fence_ctx: context composite fence */ + u64 composite_fence_ctx; + /** @parallel.composite_fence_seqno: seqno for composite fence */ + u32 composite_fence_seqno; + } parallel; /** @sched_props: scheduling properties */ struct { - /** @timeslice_us: timeslice period in micro-seconds */ + /** @sched_props.timeslice_us: timeslice period in micro-seconds */ u32 timeslice_us; - /** @preempt_timeout_us: preemption timeout in micro-seconds */ + /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */ u32 preempt_timeout_us; - /** @priority: priority of this exec queue */ + /** @sched_props.job_timeout_ms: job timeout in milliseconds */ + u32 job_timeout_ms; + /** @sched_props.priority: priority of this exec queue */ enum xe_exec_queue_priority priority; } sched_props; /** @compute: compute exec queue state */ struct { - /** @pfence: preemption fence */ + /** @compute.pfence: preemption fence */ struct dma_fence *pfence; - /** @context: preemption fence context */ + /** @compute.context: preemption fence context */ u64 context; - /** @seqno: preemption fence seqno */ + /** @compute.seqno: preemption fence seqno */ u32 seqno; - /** @link: link into VM's list of exec queues */ + /** @compute.link: link into VM's list of exec queues */ struct list_head link; - /** @lock: preemption fences lock */ + /** @compute.lock: preemption fences lock */ spinlock_t lock; } compute; /** @usm: unified shared memory state */ struct { - /** @acc_trigger: access counter trigger */ + /** @usm.acc_trigger: access counter trigger */ u32 acc_trigger; - /** @acc_notify: access counter notify */ + /** @usm.acc_notify: access counter notify */ u32 acc_notify; - /** @acc_granularity: access counter granularity */ + /** @usm.acc_granularity: access counter granularity */ u32 acc_granularity; } usm; @@ -198,8 +179,6 @@ struct xe_exec_queue_ops { int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us); /** @set_preempt_timeout: Set preemption timeout for exec queue */ int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us); - /** @set_job_timeout: Set job timeout for exec queue */ - int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms); /** * @suspend: Suspend exec queue from executing, allowed to be called * multiple times in a row before resume with the caveat that diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index 96b5224eb478..1788e78caf5c 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) list_del(&exl->active_link); spin_unlock_irqrestore(&exl->port->lock, flags); - if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) - xe_device_remove_persistent_exec_queues(xe, q); drm_sched_entity_fini(&exl->entity); drm_sched_fini(&exl->sched); kfree(exl); @@ -418,13 +416,6 @@ static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, return 0; } -static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q, - u32 job_timeout_ms) -{ - /* NIY */ - return 0; -} - static int execlist_exec_queue_suspend(struct xe_exec_queue *q) { /* NIY */ @@ -455,7 +446,6 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = { .set_priority = execlist_exec_queue_set_priority, .set_timeslice = execlist_exec_queue_set_timeslice, .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout, - .set_job_timeout = execlist_exec_queue_set_job_timeout, .suspend = execlist_exec_queue_suspend, .suspend_wait = execlist_exec_queue_suspend_wait, .resume = execlist_exec_queue_resume, diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 3efd2d066bf7..ab96edb058d6 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -11,12 +11,16 @@ #include <drm/i915_drm.h> #include "regs/xe_gt_regs.h" +#include "regs/xe_regs.h" +#include "xe_assert.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_gt_tlb_invalidation.h" #include "xe_map.h" #include "xe_mmio.h" +#include "xe_sriov.h" #include "xe_wopcm.h" #define XELPG_GGTT_PTE_PAT0 BIT_ULL(52) @@ -141,7 +145,11 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) struct pci_dev *pdev = to_pci_dev(xe->drm.dev); unsigned int gsm_size; - gsm_size = probe_gsm_size(pdev); + if (IS_SRIOV_VF(xe)) + gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */ + else + gsm_size = probe_gsm_size(pdev); + if (gsm_size == 0) { drm_err(&xe->drm, "Hardware reported no preallocated GSM\n"); return -ENOMEM; @@ -312,6 +320,74 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix) } } +static void xe_ggtt_dump_node(struct xe_ggtt *ggtt, + const struct drm_mm_node *node, const char *description) +{ + char buf[10]; + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf)); + xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n", + node->start, node->start + node->size, buf, description); + } +} + +/** + * xe_ggtt_balloon - prevent allocation of specified GGTT addresses + * @ggtt: the &xe_ggtt where we want to make reservation + * @start: the starting GGTT address of the reserved region + * @end: then end GGTT address of the reserved region + * @node: the &drm_mm_node to hold reserved GGTT node + * + * Use xe_ggtt_deballoon() to release a reserved GGTT node. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node) +{ + int err; + + xe_tile_assert(ggtt->tile, start < end); + xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE)); + xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE)); + xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(node)); + + node->color = 0; + node->start = start; + node->size = end - start; + + mutex_lock(&ggtt->lock); + err = drm_mm_reserve_node(&ggtt->mm, node); + mutex_unlock(&ggtt->lock); + + if (xe_gt_WARN(ggtt->tile->primary_gt, err, + "Failed to balloon GGTT %#llx-%#llx (%pe)\n", + node->start, node->start + node->size, ERR_PTR(err))) + return err; + + xe_ggtt_dump_node(ggtt, node, "balloon"); + return 0; +} + +/** + * xe_ggtt_deballoon - release a reserved GGTT region + * @ggtt: the &xe_ggtt where reserved node belongs + * @node: the &drm_mm_node with reserved GGTT region + * + * See xe_ggtt_balloon() for details. + */ +void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node) +{ + if (!drm_mm_node_allocated(node)) + return; + + xe_ggtt_dump_node(ggtt, node, "deballoon"); + + mutex_lock(&ggtt->lock); + drm_mm_remove_node(node); + mutex_unlock(&ggtt->lock); +} + int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node, u32 size, u32 align, u32 mm_flags) { @@ -334,7 +410,8 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node, void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) { - u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB]; + u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB; + u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode]; u64 start = bo->ggtt_node.start; u64 offset, pte; diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h index a09c166dff70..42705e1338e1 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.h +++ b/drivers/gpu/drm/xe/xe_ggtt.h @@ -16,6 +16,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt); int xe_ggtt_init(struct xe_ggtt *ggtt); void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix); +int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct drm_mm_node *node); +void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node); + int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node, u32 size, u32 align); int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index a8a895cf4b44..a61994292c43 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -7,12 +7,14 @@ #include <drm/drm_managed.h> +#include <generated/xe_wa_oob.h> + #include "abi/gsc_mkhi_commands_abi.h" -#include "generated/xe_wa_oob.h" #include "xe_bb.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_exec_queue.h" +#include "xe_gsc_proxy.h" #include "xe_gsc_submit.h" #include "xe_gt.h" #include "xe_gt_printk.h" @@ -242,8 +244,31 @@ static int gsc_upload(struct xe_gsc *gsc) if (err) return err; + return 0; +} + +static int gsc_upload_and_init(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + int ret; + + ret = gsc_upload(gsc); + if (ret) + return ret; + + xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED); xe_gt_dbg(gt, "GSC FW async load completed\n"); + /* HuC auth failure is not fatal */ + if (xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GUC)) + xe_huc_auth(>->uc.huc, XE_HUC_AUTH_VIA_GSC); + + ret = xe_gsc_proxy_start(gsc); + if (ret) + return ret; + + xe_gt_dbg(gt, "GSC proxy init completed\n"); + return 0; } @@ -252,24 +277,28 @@ static void gsc_work(struct work_struct *work) struct xe_gsc *gsc = container_of(work, typeof(*gsc), work); struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); + u32 actions; int ret; + spin_lock_irq(&gsc->lock); + actions = gsc->work_actions; + gsc->work_actions = 0; + spin_unlock_irq(&gsc->lock); + xe_device_mem_access_get(xe); xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - ret = gsc_upload(gsc); - if (ret && ret != -EEXIST) { - xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL); - goto out; + if (actions & GSC_ACTION_FW_LOAD) { + ret = gsc_upload_and_init(gsc); + if (ret && ret != -EEXIST) + xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL); + else + xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING); } - xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED); - - /* HuC auth failure is not fatal */ - if (xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GUC)) - xe_huc_auth(>->uc.huc, XE_HUC_AUTH_VIA_GSC); + if (actions & GSC_ACTION_SW_PROXY) + xe_gsc_proxy_request_handler(gsc); -out: xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); xe_device_mem_access_put(xe); } @@ -282,6 +311,7 @@ int xe_gsc_init(struct xe_gsc *gsc) gsc->fw.type = XE_UC_FW_TYPE_GSC; INIT_WORK(&gsc->work, gsc_work); + spin_lock_init(&gsc->lock); /* The GSC uC is only available on the media GT */ if (tile->media_gt && (gt != tile->media_gt)) { @@ -302,6 +332,10 @@ int xe_gsc_init(struct xe_gsc *gsc) else if (ret) goto out; + ret = xe_gsc_proxy_init(gsc); + if (ret && ret != -ENODEV) + goto out; + return 0; out: @@ -356,7 +390,7 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc) q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, hwe, EXEC_QUEUE_FLAG_KERNEL | - EXEC_QUEUE_FLAG_PERMANENT); + EXEC_QUEUE_FLAG_PERMANENT, 0); if (IS_ERR(q)) { xe_gt_err(gt, "Failed to create queue for GSC submission\n"); err = PTR_ERR(q); @@ -401,6 +435,10 @@ void xe_gsc_load_start(struct xe_gsc *gsc) return; } + spin_lock_irq(&gsc->lock); + gsc->work_actions |= GSC_ACTION_FW_LOAD; + spin_unlock_irq(&gsc->lock); + queue_work(gsc->wq, &gsc->work); } @@ -410,6 +448,15 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc) flush_work(&gsc->work); } +/** + * xe_gsc_remove() - Clean up the GSC structures before driver removal + * @gsc: the GSC uC + */ +void xe_gsc_remove(struct xe_gsc *gsc) +{ + xe_gsc_proxy_remove(gsc); +} + /* * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a * GSC engine reset by writing a notification bit in the GS1 register and then diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h index bc1ef7f31ea2..c6fb32e3fd79 100644 --- a/drivers/gpu/drm/xe/xe_gsc.h +++ b/drivers/gpu/drm/xe/xe_gsc.h @@ -14,6 +14,7 @@ int xe_gsc_init(struct xe_gsc *gsc); int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc); void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc); void xe_gsc_load_start(struct xe_gsc *gsc); +void xe_gsc_remove(struct xe_gsc *gsc); void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep); diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c new file mode 100644 index 000000000000..309ef80e3b95 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "xe_gsc_proxy.h" + +#include <linux/component.h> +#include <linux/delay.h> + +#include <drm/drm_managed.h> +#include <drm/i915_component.h> +#include <drm/i915_gsc_proxy_mei_interface.h> + +#include "abi/gsc_proxy_commands_abi.h" +#include "regs/xe_gsc_regs.h" +#include "xe_bo.h" +#include "xe_gsc.h" +#include "xe_gsc_submit.h" +#include "xe_gt.h" +#include "xe_gt_printk.h" +#include "xe_map.h" +#include "xe_mmio.h" +#include "xe_pm.h" + +/* + * GSC proxy: + * The GSC uC needs to communicate with the CSME to perform certain operations. + * Since the GSC can't perform this communication directly on platforms where it + * is integrated in GT, the graphics driver needs to transfer the messages from + * GSC to CSME and back. The proxy flow must be manually started after the GSC + * is loaded to signal to GSC that we're ready to handle its messages and allow + * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt + * if it needs to send messages to CSME again. + * The proxy flow is as follow: + * 1 - Xe submits a request to GSC asking for the message to CSME + * 2 - GSC replies with the proxy header + payload for CSME + * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component + * 4 - CSME replies with the proxy header + payload for GSC + * 5 - Xe submits a request to GSC with the reply from CSME + * 6 - GSC replies either with a new header + payload (same as step 2, so we + * restart from there) or with an end message. + */ + +/* + * The component should load quite quickly in most cases, but it could take + * a bit. Using a very big timeout just to cover the worst case scenario + */ +#define GSC_PROXY_INIT_TIMEOUT_MS 20000 + +/* shorthand define for code compactness */ +#define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header)) + +/* the protocol supports up to 32K in each direction */ +#define GSC_PROXY_BUFFER_SIZE SZ_32K +#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2) + +static struct xe_gt * +gsc_to_gt(struct xe_gsc *gsc) +{ + return container_of(gsc, struct xe_gt, uc.gsc); +} + +static inline struct xe_device *kdev_to_xe(struct device *kdev) +{ + return dev_get_drvdata(kdev); +} + +static bool gsc_proxy_init_done(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); + + return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) == + HECI1_FWSTS1_PROXY_STATE_NORMAL; +} + +static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + + /* make sure we never accidentally write the RST bit */ + clr |= HECI_H_CSR_RST; + + xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); +} + +static void gsc_proxy_irq_clear(struct xe_gsc *gsc) +{ + /* The status bit is cleared by writing to it */ + __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS); +} + +static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled) +{ + u32 set = enabled ? HECI_H_CSR_IE : 0; + u32 clr = enabled ? 0 : HECI_H_CSR_IE; + + __gsc_proxy_irq_rmw(gsc, clr, set); +} + +static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + struct i915_gsc_proxy_component *comp = gsc->proxy.component; + int ret; + + ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size); + if (ret < 0) { + xe_gt_err(gt, "Failed to send CSME proxy message\n"); + return ret; + } + + ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE); + if (ret < 0) { + xe_gt_err(gt, "Failed to receive CSME proxy message\n"); + return ret; + } + + return ret; +} + +static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo); + u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE; + int err; + + /* the message must contain at least the gsc and proxy headers */ + if (size > GSC_PROXY_BUFFER_SIZE) { + xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size); + return -EINVAL; + } + + err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size, + addr_out, GSC_PROXY_BUFFER_SIZE); + if (err) { + xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err)); + return err; + } + + return 0; +} + +static int validate_proxy_header(struct xe_gsc_proxy_header *header, + u32 source, u32 dest, u32 max_size) +{ + u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr); + u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr); + + if (header->destination != dest || header->source != source) + return -ENOEXEC; + + if (length + PROXY_HDR_SIZE > max_size) + return -E2BIG; + + switch (type) { + case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD: + if (length > 0) + break; + fallthrough; + case GSC_PROXY_MSG_TYPE_PROXY_INVALID: + return -EIO; + default: + break; + } + + return 0; +} + +#define proxy_header_wr(xe_, map_, offset_, field_, val_) \ + xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_) + +#define proxy_header_rd(xe_, map_, offset_, field_) \ + xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_) + +static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset) +{ + xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE); + + proxy_header_wr(xe, map, offset, hdr, + FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) | + FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0)); + + proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD); + proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC); + proxy_header_wr(xe, map, offset, status, 0); + + return offset + PROXY_HDR_SIZE; +} + +static int proxy_query(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + struct xe_device *xe = gt_to_xe(gt); + struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme; + void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE; + u32 wr_offset; + u32 reply_offset; + u32 size; + int ret; + + wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0, + HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE); + wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset); + + size = wr_offset; + + while (1) { + /* + * Poison the GSC response header space to make sure we don't + * read a stale reply. + */ + xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0); + + /* send proxy message to GSC */ + ret = proxy_send_to_gsc(gsc, size); + if (ret) + goto proxy_error; + + /* check the reply from GSC */ + ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0, + PROXY_HDR_SIZE, &reply_offset); + if (ret) { + xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n", + ERR_PTR(ret)); + goto proxy_error; + } + + /* copy the proxy header reply from GSC */ + xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc, + reply_offset, PROXY_HDR_SIZE); + + /* stop if this was the last message */ + if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) + break; + + /* make sure the GSC-to-CSME proxy header is sane */ + ret = validate_proxy_header(to_csme_hdr, + GSC_PROXY_ADDRESSING_GSC, + GSC_PROXY_ADDRESSING_CSME, + GSC_PROXY_BUFFER_SIZE - reply_offset); + if (ret) { + xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n", + ERR_PTR(ret)); + goto proxy_error; + } + + /* copy the rest of the message */ + size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr); + xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc, + reply_offset + PROXY_HDR_SIZE, size); + + /* send the GSC message to the CSME */ + ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE); + if (ret < 0) + goto proxy_error; + + /* reply size from CSME, including the proxy header */ + size = ret; + if (size < PROXY_HDR_SIZE) { + xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size); + ret = -EPROTO; + goto proxy_error; + } + + /* make sure the CSME-to-GSC proxy header is sane */ + ret = validate_proxy_header(gsc->proxy.from_csme, + GSC_PROXY_ADDRESSING_CSME, + GSC_PROXY_ADDRESSING_GSC, + GSC_PROXY_BUFFER_SIZE - reply_offset); + if (ret) { + xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret); + goto proxy_error; + } + + /* Emit a new header for sending the reply to the GSC */ + wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0, + HECI_MEADDRESS_PROXY, 0, size); + + /* copy the CSME reply and update the total msg size to include the GSC header */ + xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size); + + size += wr_offset; + } + +proxy_error: + return ret < 0 ? ret : 0; +} + +int xe_gsc_proxy_request_handler(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + int slept; + int err; + + if (!gsc->proxy.component_added) + return -ENODEV; + + /* when GSC is loaded, we can queue this before the component is bound */ + for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) { + if (gsc->proxy.component) + break; + + msleep(100); + } + + mutex_lock(&gsc->proxy.mutex); + if (!gsc->proxy.component) { + xe_gt_err(gt, "GSC proxy component not bound!\n"); + err = -EIO; + } else { + /* + * clear the pending interrupt and allow new proxy requests to + * be generated while we handle the current one + */ + gsc_proxy_irq_clear(gsc); + err = proxy_query(gsc); + } + mutex_unlock(&gsc->proxy.mutex); + return err; +} + +void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + + if (unlikely(!iir)) + return; + + if (!gsc->proxy.component) { + xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n"); + return; + } + + spin_lock(&gsc->lock); + gsc->work_actions |= GSC_ACTION_SW_PROXY; + spin_unlock(&gsc->lock); + + queue_work(gsc->wq, &gsc->work); +} + +static int xe_gsc_proxy_component_bind(struct device *xe_kdev, + struct device *mei_kdev, void *data) +{ + struct xe_device *xe = kdev_to_xe(xe_kdev); + struct xe_gt *gt = xe->tiles[0].media_gt; + struct xe_gsc *gsc = >->uc.gsc; + + mutex_lock(&gsc->proxy.mutex); + gsc->proxy.component = data; + gsc->proxy.component->mei_dev = mei_kdev; + mutex_unlock(&gsc->proxy.mutex); + + return 0; +} + +static void xe_gsc_proxy_component_unbind(struct device *xe_kdev, + struct device *mei_kdev, void *data) +{ + struct xe_device *xe = kdev_to_xe(xe_kdev); + struct xe_gt *gt = xe->tiles[0].media_gt; + struct xe_gsc *gsc = >->uc.gsc; + + xe_gsc_wait_for_worker_completion(gsc); + + mutex_lock(&gsc->proxy.mutex); + gsc->proxy.component = NULL; + mutex_unlock(&gsc->proxy.mutex); +} + +static const struct component_ops xe_gsc_proxy_component_ops = { + .bind = xe_gsc_proxy_component_bind, + .unbind = xe_gsc_proxy_component_unbind, +}; + +static void proxy_channel_free(struct drm_device *drm, void *arg) +{ + struct xe_gsc *gsc = arg; + + if (!gsc->proxy.bo) + return; + + if (gsc->proxy.to_csme) { + kfree(gsc->proxy.to_csme); + gsc->proxy.to_csme = NULL; + gsc->proxy.from_csme = NULL; + } + + if (gsc->proxy.bo) { + iosys_map_clear(&gsc->proxy.to_gsc); + iosys_map_clear(&gsc->proxy.from_gsc); + xe_bo_unpin_map_no_vm(gsc->proxy.bo); + gsc->proxy.bo = NULL; + } +} + +static int proxy_channel_alloc(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = gt_to_xe(gt); + struct xe_bo *bo; + void *csme; + int err; + + csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL); + if (!csme) + return -ENOMEM; + + bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE, + ttm_bo_type_kernel, + XE_BO_CREATE_SYSTEM_BIT | + XE_BO_CREATE_GGTT_BIT); + if (IS_ERR(bo)) { + kfree(csme); + return PTR_ERR(bo); + } + + gsc->proxy.bo = bo; + gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0); + gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE); + gsc->proxy.to_csme = csme; + gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE; + + err = drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc); + if (err) + return err; + + return 0; +} + +/** + * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy + * @gsc: the GSC uC + * + * Return: 0 if the initialization was successful, a negative errno otherwise. + */ +int xe_gsc_proxy_init(struct xe_gsc *gsc) +{ + int err; + struct xe_gt *gt = gsc_to_gt(gsc); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = tile_to_xe(tile); + + mutex_init(&gsc->proxy.mutex); + + if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) { + xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n"); + return -ENODEV; + } + + /* no multi-tile devices with this feature yet */ + if (tile->id > 0) { + xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id); + return -EINVAL; + } + + err = proxy_channel_alloc(gsc); + if (err) + return err; + + err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops, + I915_COMPONENT_GSC_PROXY); + if (err < 0) { + xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err)); + return err; + } + + gsc->proxy.component_added = true; + + /* the component must be removed before unload, so can't use drmm for cleanup */ + + return 0; +} + +/** + * xe_gsc_proxy_remove() - remove the GSC proxy MEI component + * @gsc: the GSC uC + */ +void xe_gsc_proxy_remove(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + struct xe_device *xe = gt_to_xe(gt); + int err = 0; + + if (!gsc->proxy.component_added) + return; + + /* disable HECI2 IRQs */ + xe_pm_runtime_get(xe); + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (err) + xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n"); + + /* try do disable irq even if forcewake failed */ + gsc_proxy_irq_toggle(gsc, false); + + if (!err) + xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_pm_runtime_put(xe); + + xe_gsc_wait_for_worker_completion(gsc); + + component_del(xe->drm.dev, &xe_gsc_proxy_component_ops); + gsc->proxy.component_added = false; +} + +/** + * xe_gsc_proxy_start() - start the proxy by submitting the first request + * @gsc: the GSC uC + * + * Return: 0 if the proxy are now enabled, a negative errno otherwise. + */ +int xe_gsc_proxy_start(struct xe_gsc *gsc) +{ + int err; + + /* enable the proxy interrupt in the GSC shim layer */ + gsc_proxy_irq_toggle(gsc, true); + + /* + * The handling of the first proxy request must be manually triggered to + * notify the GSC that we're ready to support the proxy flow. + */ + err = xe_gsc_proxy_request_handler(gsc); + if (err) + return err; + + if (!gsc_proxy_init_done(gsc)) { + xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n"); + return -EIO; + } + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h new file mode 100644 index 000000000000..908f9441f093 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GSC_PROXY_H_ +#define _XE_GSC_PROXY_H_ + +#include <linux/types.h> + +struct xe_gsc; + +int xe_gsc_proxy_init(struct xe_gsc *gsc); +void xe_gsc_proxy_remove(struct xe_gsc *gsc); +int xe_gsc_proxy_start(struct xe_gsc *gsc); + +int xe_gsc_proxy_request_handler(struct xe_gsc *gsc); +void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir); + +#endif diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c index 8c5381e5913f..348994b271be 100644 --- a/drivers/gpu/drm/xe/xe_gsc_submit.c +++ b/drivers/gpu/drm/xe/xe_gsc_submit.c @@ -5,6 +5,8 @@ #include "xe_gsc_submit.h" +#include <linux/poison.h> + #include "abi/gsc_command_header_abi.h" #include "xe_bb.h" #include "xe_exec_queue.h" @@ -69,6 +71,17 @@ u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, }; /** + * xe_gsc_poison_header - poison the MTL GSC header in memory + * @xe: the Xe device + * @map: the iosys map to write to + * @offset: offset from the start of the map at which the header resides + */ +void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset) +{ + xe_map_memset(xe, map, offset, POISON_FREE, GSC_HDR_SIZE); +}; + +/** * xe_gsc_check_and_update_pending - check the pending bit and update the input * header with the retry handle from the output header * @xe: the Xe device @@ -112,11 +125,18 @@ int xe_gsc_read_out_header(struct xe_device *xe, { u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker); u32 size = mtl_gsc_header_rd(xe, map, offset, message_size); + u32 status = mtl_gsc_header_rd(xe, map, offset, status); u32 payload_size = size - GSC_HDR_SIZE; if (marker != GSC_HECI_VALIDITY_MARKER) return -EPROTO; + if (status != 0) { + drm_err(&xe->drm, "GSC header readout indicates error: %d\n", + status); + return -EINVAL; + } + if (size < GSC_HDR_SIZE || payload_size < min_payload_size) return -ENODATA; diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.h b/drivers/gpu/drm/xe/xe_gsc_submit.h index 0801da5d446a..1939855031a6 100644 --- a/drivers/gpu/drm/xe/xe_gsc_submit.h +++ b/drivers/gpu/drm/xe/xe_gsc_submit.h @@ -14,6 +14,7 @@ struct xe_gsc; u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, u8 heci_client_id, u64 host_session_id, u32 payload_size); +void xe_gsc_poison_header(struct xe_device *xe, struct iosys_map *map, u32 offset); bool xe_gsc_check_and_update_pending(struct xe_device *xe, struct iosys_map *in, u32 offset_in, diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h index 57fefd66a7ea..138d8cc0f19c 100644 --- a/drivers/gpu/drm/xe/xe_gsc_types.h +++ b/drivers/gpu/drm/xe/xe_gsc_types.h @@ -6,12 +6,17 @@ #ifndef _XE_GSC_TYPES_H_ #define _XE_GSC_TYPES_H_ +#include <linux/iosys-map.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/types.h> #include <linux/workqueue.h> #include "xe_uc_fw_types.h" struct xe_bo; struct xe_exec_queue; +struct i915_gsc_proxy_component; /** * struct xe_gsc - GSC @@ -34,6 +39,34 @@ struct xe_gsc { /** @work: delayed load and proxy handling work */ struct work_struct work; + + /** @lock: protects access to the work_actions mask */ + spinlock_t lock; + + /** @work_actions: mask of actions to be performed in the work */ + u32 work_actions; +#define GSC_ACTION_FW_LOAD BIT(0) +#define GSC_ACTION_SW_PROXY BIT(1) + + /** @proxy: sub-structure containing the SW proxy-related variables */ + struct { + /** @proxy.component: struct for communication with mei component */ + struct i915_gsc_proxy_component *component; + /** @proxy.mutex: protects the component binding and usage */ + struct mutex mutex; + /** @proxy.component_added: whether the component has been added */ + bool component_added; + /** @proxy.bo: object to store message to and from the GSC */ + struct xe_bo *bo; + /** @proxy.to_gsc: map of the memory used to send messages to the GSC */ + struct iosys_map to_gsc; + /** @proxy.from_gsc: map of the memory used to recv messages from the GSC */ + struct iosys_map from_gsc; + /** @proxy.to_csme: pointer to the memory used to send messages to CSME */ + void *to_csme; + /** @proxy.from_csme: pointer to the memory used to recv messages from CSME */ + void *from_csme; + } proxy; }; #endif diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3af2adec1295..b75f0bf0a9a1 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -78,6 +78,19 @@ void xe_gt_sanitize(struct xe_gt *gt) gt->uc.guc.submission_state.enabled = false; } +/** + * xe_gt_remove() - Clean up the GT structures before driver removal + * @gt: the GT object + * + * This function should only act on objects/structures that must be cleaned + * before the driver removal callback is complete and therefore can't be + * deferred to a drmm action. + */ +void xe_gt_remove(struct xe_gt *gt) +{ + xe_uc_remove(>->uc); +} + static void gt_fini(struct drm_device *drm, void *arg) { struct xe_gt *gt = arg; @@ -235,7 +248,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) return -ENOMEM; q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, - hwe, EXEC_QUEUE_FLAG_KERNEL); + hwe, EXEC_QUEUE_FLAG_KERNEL, 0); if (IS_ERR(q)) { err = PTR_ERR(q); xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n", @@ -252,7 +265,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) } nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), - 1, hwe, EXEC_QUEUE_FLAG_KERNEL); + 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0); if (IS_ERR(nop_q)) { err = PTR_ERR(nop_q); xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n", @@ -302,7 +315,6 @@ int xe_gt_init_early(struct xe_gt *gt) return err; xe_gt_topology_init(gt); - xe_gt_mcr_init(gt); err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); if (err) @@ -327,7 +339,7 @@ static void dump_pat_on_error(struct xe_gt *gt) char prefix[32]; snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id); - p = drm_debug_printer(prefix); + p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix); xe_pat_dump(gt, &p); } @@ -341,8 +353,6 @@ static int gt_fw_domain_init(struct xe_gt *gt) if (err) goto err_hw_fence_irq; - xe_pat_init(gt); - if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); if (err) @@ -351,22 +361,8 @@ static int gt_fw_domain_init(struct xe_gt *gt) xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt); } - err = xe_uc_init(>->uc); - if (err) - goto err_force_wake; - - /* Raise GT freq to speed up HuC/GuC load */ - xe_guc_pc_init_early(>->uc.guc.pc); - - err = xe_uc_init_hwconfig(>->uc); - if (err) - goto err_force_wake; - xe_gt_idle_sysfs_init(>->gtidle); - /* XXX: Fake that we pull the engine mask from hwconfig blob */ - gt->info.engine_mask = gt->info.__engine_mask; - /* Enable per hw engine IRQs */ xe_irq_enable_hwe(gt); @@ -386,6 +382,12 @@ static int gt_fw_domain_init(struct xe_gt *gt) /* Initialize CCS mode sysfs after early initialization of HW engines */ xe_gt_ccs_mode_sysfs_init(gt); + /* + * Stash hardware-reported version. Since this register does not exist + * on pre-MTL platforms, reading it there will (correctly) return 0. + */ + gt->info.gmdid = xe_mmio_read32(gt, GMD_ID); + err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); XE_WARN_ON(err); xe_device_mem_access_put(gt_to_xe(gt)); @@ -428,16 +430,15 @@ static int all_fw_domain_init(struct xe_gt *gt) if (err) goto err_force_wake; - err = xe_uc_init_post_hwconfig(>->uc); - if (err) - goto err_force_wake; - if (!xe_gt_is_media_type(gt)) { /* * USM has its only SA pool to non-block behind user operations */ if (gt_to_xe(gt)->info.has_usm) { - gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16); + struct xe_device *xe = gt_to_xe(gt); + + gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), + IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); if (IS_ERR(gt->usm.bb_pool)) { err = PTR_ERR(gt->usm.bb_pool); goto err_force_wake; @@ -455,6 +456,10 @@ static int all_fw_domain_init(struct xe_gt *gt) } } + err = xe_uc_init_post_hwconfig(>->uc); + if (err) + goto err_force_wake; + err = xe_uc_init_hw(>->uc); if (err) goto err_force_wake; @@ -484,6 +489,41 @@ err_hw_fence_irq: return err; } +/* + * Initialize enough GT to be able to load GuC in order to obtain hwconfig and + * enable CTB communication. + */ +int xe_gt_init_hwconfig(struct xe_gt *gt) +{ + int err; + + xe_device_mem_access_get(gt_to_xe(gt)); + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (err) + goto out; + + xe_gt_mcr_init(gt); + xe_pat_init(gt); + + err = xe_uc_init(>->uc); + if (err) + goto out_fw; + + err = xe_uc_init_hwconfig(>->uc); + if (err) + goto out_fw; + + /* XXX: Fake that we pull the engine mask from hwconfig blob */ + gt->info.engine_mask = gt->info.__engine_mask; + +out_fw: + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); +out: + xe_device_mem_access_put(gt_to_xe(gt)); + + return err; +} + int xe_gt_init(struct xe_gt *gt) { int err; @@ -619,12 +659,12 @@ static int gt_reset(struct xe_gt *gt) if (err) goto err_out; + xe_gt_tlb_invalidation_reset(gt); + err = do_gt_reset(gt); if (err) goto err_out; - xe_gt_tlb_invalidation_reset(gt); - err = do_gt_restart(gt); if (err) goto err_out; diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 4486e083f5ef..ed6ea8057e35 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -33,6 +33,7 @@ static inline bool xe_fault_inject_gt_reset(void) #endif struct xe_gt *xe_gt_alloc(struct xe_tile *tile); +int xe_gt_init_hwconfig(struct xe_gt *gt); int xe_gt_init_early(struct xe_gt *gt); int xe_gt_init(struct xe_gt *gt); int xe_gt_record_default_lrcs(struct xe_gt *gt); @@ -41,6 +42,7 @@ int xe_gt_suspend(struct xe_gt *gt); int xe_gt_resume(struct xe_gt *gt); void xe_gt_reset_async(struct xe_gt *gt); void xe_gt_sanitize(struct xe_gt *gt); +void xe_gt_remove(struct xe_gt *gt); /** * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 9358f7336889..9fcae65b6469 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle) } if (xe_gt_is_media_type(gt)) { - sprintf(gtidle->name, "gt%d-mc\n", gt->info.id); + sprintf(gtidle->name, "gt%d-mc", gt->info.id); gtidle->idle_residency = xe_guc_pc_mc6_residency; } else { - sprintf(gtidle->name, "gt%d-rc\n", gt->info.id); + sprintf(gtidle->name, "gt%d-rc", gt->info.id); gtidle->idle_residency = xe_guc_pc_rc6_residency; } diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 8546cd3cc50d..a7ab9ba645f9 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -10,6 +10,7 @@ #include "xe_gt_topology.h" #include "xe_gt_types.h" #include "xe_mmio.h" +#include "xe_sriov.h" /** * DOC: GT Multicast/Replicated (MCR) Register Support @@ -38,6 +39,8 @@ * ``init_steering_*()`` functions is to apply the platform-specific rules for * each MCR register type to identify a steering target that will select a * non-terminated instance. + * + * MCR registers are not available on Virtual Function (VF). */ #define STEER_SEMAPHORE XE_REG(0xFD0) @@ -352,6 +355,9 @@ void xe_gt_mcr_init(struct xe_gt *gt) BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES); BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES); + if (IS_SRIOV_VF(xe)) + return; + spin_lock_init(>->mcr_lock); if (gt->info.type == XE_GT_TYPE_MEDIA) { @@ -405,6 +411,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + if (IS_SRIOV_VF(xe)) + return; + if (xe->info.platform == XE_DG2) { u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) | REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2); @@ -588,6 +597,8 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr) u32 val; bool steer; + xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); + steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr, &group, &instance); @@ -619,6 +630,8 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, { u32 val; + xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); + mcr_lock(gt); val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0); mcr_unlock(gt); @@ -640,6 +653,8 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, u32 value, int group, int instance) { + xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); + mcr_lock(gt); rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value); mcr_unlock(gt); @@ -658,6 +673,8 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, { struct xe_reg reg = to_xe_reg(reg_mcr); + xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); + /* * Synchronize with any unicast operations. Once we have exclusive * access, the MULTICAST bit should already be set, so there's no need diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 9c2fe1697d6e..c26e4fcca01e 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -285,9 +285,9 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf) bool ret = false; spin_lock_irq(&pf_queue->lock); - if (pf_queue->head != pf_queue->tail) { + if (pf_queue->tail != pf_queue->head) { desc = (const struct xe_guc_pagefault_desc *) - (pf_queue->data + pf_queue->head); + (pf_queue->data + pf_queue->tail); pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0); pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0); @@ -305,7 +305,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf) pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) << PFD_VIRTUAL_ADDR_LO_SHIFT; - pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) % + pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) % PF_QUEUE_NUM_DW; ret = true; } @@ -318,7 +318,7 @@ static bool pf_queue_full(struct pf_queue *pf_queue) { lockdep_assert_held(&pf_queue->lock); - return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <= + return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <= PF_MSG_LEN_DW; } @@ -331,17 +331,22 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) u32 asid; bool full; + /* + * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0 + */ + BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW); + if (unlikely(len != PF_MSG_LEN_DW)) return -EPROTO; asid = FIELD_GET(PFD_ASID, msg[1]); - pf_queue = >->usm.pf_queue[asid % NUM_PF_QUEUE]; + pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE); spin_lock_irqsave(&pf_queue->lock, flags); full = pf_queue_full(pf_queue); if (!full) { - memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32)); - pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW; + memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32)); + pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW; queue_work(gt->usm.pf_wq, &pf_queue->worker); } else { drm_warn(&xe->drm, "PF Queue full, shouldn't be possible"); @@ -387,7 +392,7 @@ static void pf_queue_work_func(struct work_struct *w) send_pagefault_reply(>->uc.guc, &reply); if (time_after(jiffies, threshold) && - pf_queue->head != pf_queue->tail) { + pf_queue->tail != pf_queue->head) { queue_work(gt->usm.pf_wq, w); break; } @@ -562,9 +567,9 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc) bool ret = false; spin_lock(&acc_queue->lock); - if (acc_queue->head != acc_queue->tail) { + if (acc_queue->tail != acc_queue->head) { desc = (const struct xe_guc_acc_desc *) - (acc_queue->data + acc_queue->head); + (acc_queue->data + acc_queue->tail); acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2); acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 | @@ -577,7 +582,7 @@ static bool get_acc(struct acc_queue *acc_queue, struct acc *acc) acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI, desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO); - acc_queue->head = (acc_queue->head + ACC_MSG_LEN_DW) % + acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) % ACC_QUEUE_NUM_DW; ret = true; } @@ -605,7 +610,7 @@ static void acc_queue_work_func(struct work_struct *w) } if (time_after(jiffies, threshold) && - acc_queue->head != acc_queue->tail) { + acc_queue->tail != acc_queue->head) { queue_work(gt->usm.acc_wq, w); break; } @@ -616,7 +621,7 @@ static bool acc_queue_full(struct acc_queue *acc_queue) { lockdep_assert_held(&acc_queue->lock); - return CIRC_SPACE(acc_queue->tail, acc_queue->head, ACC_QUEUE_NUM_DW) <= + return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <= ACC_MSG_LEN_DW; } @@ -627,6 +632,11 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len) u32 asid; bool full; + /* + * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0 + */ + BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW); + if (unlikely(len != ACC_MSG_LEN_DW)) return -EPROTO; @@ -636,9 +646,9 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len) spin_lock(&acc_queue->lock); full = acc_queue_full(acc_queue); if (!full) { - memcpy(acc_queue->data + acc_queue->tail, msg, + memcpy(acc_queue->data + acc_queue->head, msg, len * sizeof(u32)); - acc_queue->tail = (acc_queue->tail + len) % ACC_QUEUE_NUM_DW; + acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW; queue_work(gt->usm.acc_wq, &acc_queue->worker); } else { drm_warn(>_to_xe(gt)->drm, "ACC Queue full, dropping ACC"); diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h index 5991bcadd47e..c2b004d3f48e 100644 --- a/drivers/gpu/drm/xe/xe_gt_printk.h +++ b/drivers/gpu/drm/xe/xe_gt_printk.h @@ -43,4 +43,48 @@ #define xe_gt_WARN_ON_ONCE(_gt, _condition) \ xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition)) +static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_gt *gt = p->arg; + + xe_gt_err(gt, "%pV", vaf); +} + +static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_gt *gt = p->arg; + + xe_gt_info(gt, "%pV", vaf); +} + +/** + * xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err() + * @gt: the &xe_gt pointer to use in xe_gt_err() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_gt_err_printer(struct xe_gt *gt) +{ + struct drm_printer p = { + .printfn = __xe_gt_printfn_err, + .arg = gt, + }; + return p; +} + +/** + * xe_gt_info_printer - Construct a &drm_printer that outputs to xe_gt_info() + * @gt: the &xe_gt pointer to use in xe_gt_info() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt) +{ + struct drm_printer p = { + .printfn = __xe_gt_printfn_info, + .arg = gt, + }; + return p; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h new file mode 100644 index 000000000000..17624b16300a --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GT_SRIOV_PRINTK_H_ +#define _XE_GT_SRIOV_PRINTK_H_ + +#include "xe_gt_printk.h" +#include "xe_sriov_printk.h" + +#define __xe_gt_sriov_printk(gt, _level, fmt, ...) \ + xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__) + +#define xe_gt_sriov_err(_gt, _fmt, ...) \ + __xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__) + +#define xe_gt_sriov_notice(_gt, _fmt, ...) \ + __xe_gt_sriov_printk(_gt, notice, _fmt, ##__VA_ARGS__) + +#define xe_gt_sriov_info(_gt, _fmt, ...) \ + __xe_gt_sriov_printk(_gt, info, _fmt, ##__VA_ARGS__) + +#define xe_gt_sriov_dbg(_gt, _fmt, ...) \ + __xe_gt_sriov_printk(_gt, dbg, _fmt, ##__VA_ARGS__) + +/* for low level noisy debug messages */ +#ifdef CONFIG_DRM_XE_DEBUG_SRIOV +#define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) xe_gt_sriov_dbg(_gt, _fmt, ##__VA_ARGS__) +#else +#define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) typecheck(struct xe_gt *, (_gt)) +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 7eef23a00d77..e3a4131ebb58 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -8,6 +8,7 @@ #include "abi/guc_actions_abi.h" #include "xe_device.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_trace.h" @@ -30,8 +31,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) break; trace_xe_gt_tlb_invalidation_fence_timeout(fence); - drm_err(>_to_xe(gt)->drm, "gt%d: TLB invalidation fence timeout, seqno=%d recv=%d", - gt->info.id, fence->seqno, gt->tlb_invalidation.seqno_recv); + xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d", + fence->seqno, gt->tlb_invalidation.seqno_recv); list_del(&fence->link); fence->base.error = -ETIME; @@ -312,9 +313,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, */ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno) { - struct xe_device *xe = gt_to_xe(gt); struct xe_guc *guc = >->uc.guc; - struct drm_printer p = drm_err_printer(__func__); int ret; /* @@ -325,8 +324,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno) tlb_invalidation_seqno_past(gt, seqno), TLB_TIMEOUT); if (!ret) { - drm_err(&xe->drm, "gt%d: TLB invalidation time'd out, seqno=%d, recv=%d\n", - gt->info.id, seqno, gt->tlb_invalidation.seqno_recv); + struct drm_printer p = xe_gt_err_printer(gt); + + xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n", + seqno, gt->tlb_invalidation.seqno_recv); xe_guc_ct_print(&guc->ct, &p, true); return -ETIME; } diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index a8d7f272c30a..5dc62fe1be49 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -84,7 +84,7 @@ void xe_gt_topology_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); - struct drm_printer p = drm_debug_printer("GT topology"); + struct drm_printer p; int num_geometry_regs, num_compute_regs; get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs); @@ -107,6 +107,8 @@ xe_gt_topology_init(struct xe_gt *gt) XE2_GT_COMPUTE_DSS_2); load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss); + p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology"); + xe_gt_topology_dump(gt, &p); } diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index f74684660475..70c615dd1498 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -103,20 +103,22 @@ struct xe_gt { /** @info: GT info */ struct { - /** @type: type of GT */ + /** @info.type: type of GT */ enum xe_gt_type type; - /** @id: Unique ID of this GT within the PCI Device */ + /** @info.id: Unique ID of this GT within the PCI Device */ u8 id; - /** @reference_clock: clock frequency */ + /** @info.reference_clock: clock frequency */ u32 reference_clock; - /** @engine_mask: mask of engines present on GT */ + /** @info.engine_mask: mask of engines present on GT */ u64 engine_mask; /** - * @__engine_mask: mask of engines present on GT read from + * @info.__engine_mask: mask of engines present on GT read from * xe_pci.c, used to fake reading the engine_mask from the * hwconfig blob. */ u64 __engine_mask; + /** @info.gmdid: raw GMD_ID value from hardware */ + u32 gmdid; } info; /** @@ -125,14 +127,14 @@ struct xe_gt { * specific offset, as well as their own forcewake handling. */ struct { - /** @fw: force wake for GT */ + /** @mmio.fw: force wake for GT */ struct xe_force_wake fw; /** - * @adj_limit: adjust MMIO address if address is below this + * @mmio.adj_limit: adjust MMIO address if address is below this * value */ u32 adj_limit; - /** @adj_offset: offect to add to MMIO address when adjusting */ + /** @mmio.adj_offset: offect to add to MMIO address when adjusting */ u32 adj_offset; } mmio; @@ -144,7 +146,7 @@ struct xe_gt { /** @reset: state for GT resets */ struct { /** - * @worker: work so GT resets can done async allowing to reset + * @reset.worker: work so GT resets can done async allowing to reset * code to safely flush all code paths */ struct work_struct worker; @@ -152,36 +154,37 @@ struct xe_gt { /** @tlb_invalidation: TLB invalidation state */ struct { - /** @seqno: TLB invalidation seqno, protected by CT lock */ + /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */ #define TLB_INVALIDATION_SEQNO_MAX 0x100000 int seqno; /** - * @seqno_recv: last received TLB invalidation seqno, protected by CT lock + * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno, + * protected by CT lock */ int seqno_recv; /** - * @pending_fences: list of pending fences waiting TLB + * @tlb_invalidation.pending_fences: list of pending fences waiting TLB * invaliations, protected by CT lock */ struct list_head pending_fences; /** - * @pending_lock: protects @pending_fences and updating - * @seqno_recv. + * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences + * and updating @tlb_invalidation.seqno_recv. */ spinlock_t pending_lock; /** - * @fence_tdr: schedules a delayed call to + * @tlb_invalidation.fence_tdr: schedules a delayed call to * xe_gt_tlb_fence_timeout after the timeut interval is over. */ struct delayed_work fence_tdr; - /** @fence_context: context for TLB invalidation fences */ + /** @tlb_invalidation.fence_context: context for TLB invalidation fences */ u64 fence_context; /** - * @fence_seqno: seqno to TLB invalidation fences, protected by + * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by * tlb_invalidation.lock */ u32 fence_seqno; - /** @lock: protects TLB invalidation fences */ + /** @tlb_invalidation.lock: protects TLB invalidation fences */ spinlock_t lock; } tlb_invalidation; @@ -196,7 +199,7 @@ struct xe_gt { /** @usm: unified shared memory state */ struct { /** - * @bb_pool: Pool from which batchbuffers, for USM operations + * @usm.bb_pool: Pool from which batchbuffers, for USM operations * (e.g. migrations, fixing page tables), are allocated. * Dedicated pool needed so USM operations to not get blocked * behind any user operations which may have resulted in a @@ -204,66 +207,67 @@ struct xe_gt { */ struct xe_sa_manager *bb_pool; /** - * @reserved_bcs_instance: reserved BCS instance used for USM + * @usm.reserved_bcs_instance: reserved BCS instance used for USM * operations (e.g. mmigrations, fixing page tables) */ u16 reserved_bcs_instance; - /** @pf_wq: page fault work queue, unbound, high priority */ + /** @usm.pf_wq: page fault work queue, unbound, high priority */ struct workqueue_struct *pf_wq; - /** @acc_wq: access counter work queue, unbound, high priority */ + /** @usm.acc_wq: access counter work queue, unbound, high priority */ struct workqueue_struct *acc_wq; /** - * @pf_queue: Page fault queue used to sync faults so faults can + * @usm.pf_queue: Page fault queue used to sync faults so faults can * be processed not under the GuC CT lock. The queue is sized so * it can sync all possible faults (1 per physical engine). * Multiple queues exists for page faults from different VMs are * be processed in parallel. */ struct pf_queue { - /** @gt: back pointer to GT */ + /** @usm.pf_queue.gt: back pointer to GT */ struct xe_gt *gt; #define PF_QUEUE_NUM_DW 128 - /** @data: data in the page fault queue */ + /** @usm.pf_queue.data: data in the page fault queue */ u32 data[PF_QUEUE_NUM_DW]; /** - * @head: head pointer in DWs for page fault queue, - * moved by worker which processes faults. + * @usm.pf_queue.tail: tail pointer in DWs for page fault queue, + * moved by worker which processes faults (consumer). */ - u16 head; + u16 tail; /** - * @tail: tail pointer in DWs for page fault queue, - * moved by G2H handler. + * @usm.pf_queue.head: head pointer in DWs for page fault queue, + * moved by G2H handler (producer). */ - u16 tail; - /** @lock: protects page fault queue */ + u16 head; + /** @usm.pf_queue.lock: protects page fault queue */ spinlock_t lock; - /** @worker: to process page faults */ + /** @usm.pf_queue.worker: to process page faults */ struct work_struct worker; #define NUM_PF_QUEUE 4 } pf_queue[NUM_PF_QUEUE]; /** - * @acc_queue: Same as page fault queue, cannot process access + * @usm.acc_queue: Same as page fault queue, cannot process access * counters under CT lock. */ struct acc_queue { - /** @gt: back pointer to GT */ + /** @usm.acc_queue.gt: back pointer to GT */ struct xe_gt *gt; #define ACC_QUEUE_NUM_DW 128 - /** @data: data in the page fault queue */ + /** @usm.acc_queue.data: data in the page fault queue */ u32 data[ACC_QUEUE_NUM_DW]; /** - * @head: head pointer in DWs for page fault queue, - * moved by worker which processes faults. + * @usm.acc_queue.tail: tail pointer in DWs for access counter queue, + * moved by worker which processes counters + * (consumer). */ - u16 head; + u16 tail; /** - * @tail: tail pointer in DWs for page fault queue, - * moved by G2H handler. + * @usm.acc_queue.head: head pointer in DWs for access counter queue, + * moved by G2H handler (producer). */ - u16 tail; - /** @lock: protects page fault queue */ + u16 head; + /** @usm.acc_queue.lock: protects page fault queue */ spinlock_t lock; - /** @worker: to process access counters */ + /** @usm.acc_queue.worker: to process access counters */ struct work_struct worker; #define NUM_ACC_QUEUE 4 } acc_queue[NUM_ACC_QUEUE]; @@ -300,7 +304,7 @@ struct xe_gt { /** @pcode: GT's PCODE */ struct { - /** @lock: protecting GT's PCODE mailbox data */ + /** @pcode.lock: protecting GT's PCODE mailbox data */ struct mutex lock; } pcode; @@ -312,32 +316,32 @@ struct xe_gt { /** @mocs: info */ struct { - /** @uc_index: UC index */ + /** @mocs.uc_index: UC index */ u8 uc_index; - /** @wb_index: WB index, only used on L3_CCS platforms */ + /** @mocs.wb_index: WB index, only used on L3_CCS platforms */ u8 wb_index; } mocs; /** @fuse_topo: GT topology reported by fuse registers */ struct { - /** @g_dss_mask: dual-subslices usable by geometry */ + /** @fuse_topo.g_dss_mask: dual-subslices usable by geometry */ xe_dss_mask_t g_dss_mask; - /** @c_dss_mask: dual-subslices usable by compute */ + /** @fuse_topo.c_dss_mask: dual-subslices usable by compute */ xe_dss_mask_t c_dss_mask; - /** @eu_mask_per_dss: EU mask per DSS*/ + /** @fuse_topo.eu_mask_per_dss: EU mask per DSS*/ xe_eu_mask_t eu_mask_per_dss; } fuse_topo; /** @steering: register steering for individual HW units */ struct { - /* @ranges: register ranges used for this steering type */ + /** @steering.ranges: register ranges used for this steering type */ const struct xe_mmio_range *ranges; - /** @group_target: target to steer accesses to */ + /** @steering.group_target: target to steer accesses to */ u16 group_target; - /** @instance_target: instance to steer accesses to */ + /** @steering.instance_target: instance to steer accesses to */ u16 instance_target; } steering[NUM_STEERING_TYPES]; @@ -349,13 +353,13 @@ struct xe_gt { /** @wa_active: keep track of active workarounds */ struct { - /** @gt: bitmap with active GT workarounds */ + /** @wa_active.gt: bitmap with active GT workarounds */ unsigned long *gt; - /** @engine: bitmap with active engine workarounds */ + /** @wa_active.engine: bitmap with active engine workarounds */ unsigned long *engine; - /** @lrc: bitmap with active LRC workarounds */ + /** @wa_active.lrc: bitmap with active LRC workarounds */ unsigned long *lrc; - /** @oob: bitmap with active OOB workaroudns */ + /** @wa_active.oob: bitmap with active OOB workaroudns */ unsigned long *oob; } wa_active; }; diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 0a61390c64a7..0d2a2dd13f11 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -7,9 +7,10 @@ #include <drm/drm_managed.h> +#include <generated/xe_wa_oob.h> + #include "abi/guc_actions_abi.h" #include "abi/guc_errors_abi.h" -#include "generated/xe_wa_oob.h" #include "regs/xe_gt_regs.h" #include "regs/xe_guc_regs.h" #include "xe_bo.h" @@ -21,9 +22,12 @@ #include "xe_guc_hwconfig.h" #include "xe_guc_log.h" #include "xe_guc_pc.h" +#include "xe_guc_relay.h" #include "xe_guc_submit.h" +#include "xe_memirq.h" #include "xe_mmio.h" #include "xe_platform_types.h" +#include "xe_sriov.h" #include "xe_uc.h" #include "xe_uc_fw.h" #include "xe_wa.h" @@ -129,22 +133,24 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc) return flags; } +#define GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat)) + static u32 guc_ctl_wa_flags(struct xe_guc *guc) { struct xe_device *xe = guc_to_xe(guc); struct xe_gt *gt = guc_to_gt(guc); + struct xe_uc_fw *uc_fw = &guc->fw; + struct xe_uc_fw_version *version = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE]; + u32 flags = 0; if (XE_WA(gt, 22012773006)) flags |= GUC_WA_POLLCS; - if (XE_WA(gt, 16011759253)) - flags |= GUC_WA_GAM_CREDITS; - if (XE_WA(gt, 14014475959)) flags |= GUC_WA_HOLD_CCS_SWITCHOUT; - if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797)) + if (XE_WA(gt, 22011391025)) flags |= GUC_WA_DUAL_QUEUE; /* @@ -155,9 +161,6 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc) if (GRAPHICS_VERx100(xe) < 1270) flags |= GUC_WA_PRE_PARSER; - if (XE_WA(gt, 16011777198)) - flags |= GUC_WA_RCS_RESET_BEFORE_RC6; - if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685)) flags |= GUC_WA_CONTEXT_ISOLATION; @@ -168,6 +171,14 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc) if (XE_WA(gt, 1509372804)) flags |= GUC_WA_RENDER_RST_RC6_EXIT; + if (XE_WA(gt, 14018913170)) { + if (GUC_VER(version->major, version->minor, version->patch) >= GUC_VER(70, 7, 0)) + flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6; + else + drm_dbg(&xe->drm, "Skip WA 14018913170: GUC version expected >= 70.7.0, found %u.%u.%u\n", + version->major, version->minor, version->patch); + } + return flags; } @@ -241,11 +252,54 @@ static void guc_fini(struct drm_device *drm, void *arg) struct xe_guc *guc = arg; xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL); - xe_guc_pc_fini(&guc->pc); xe_uc_fini_hw(&guc_to_gt(guc)->uc); xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL); } +/** + * xe_guc_comm_init_early - early initialization of GuC communication + * @guc: the &xe_guc to initialize + * + * Must be called prior to first MMIO communication with GuC firmware. + */ +void xe_guc_comm_init_early(struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + + if (xe_gt_is_media_type(gt)) + guc->notify_reg = MED_GUC_HOST_INTERRUPT; + else + guc->notify_reg = GUC_HOST_INTERRUPT; +} + +static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc) +{ + struct xe_tile *tile = gt_to_tile(guc_to_gt(guc)); + struct xe_device *xe = guc_to_xe(guc); + int ret; + + if (!IS_DGFX(guc_to_xe(guc))) + return 0; + + ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo); + if (ret) + return ret; + + ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo); + if (ret) + return ret; + + ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo); + if (ret) + return ret; + + ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo); + if (ret) + return ret; + + return 0; +} + int xe_guc_init(struct xe_guc *guc) { struct xe_device *xe = guc_to_xe(guc); @@ -272,7 +326,7 @@ int xe_guc_init(struct xe_guc *guc) if (ret) goto out; - ret = xe_guc_pc_init(&guc->pc); + ret = xe_guc_relay_init(&guc->relay); if (ret) goto out; @@ -282,10 +336,7 @@ int xe_guc_init(struct xe_guc *guc) guc_init_params(guc); - if (xe_gt_is_media_type(gt)) - guc->notify_reg = MED_GUC_HOST_INTERRUPT; - else - guc->notify_reg = GUC_HOST_INTERRUPT; + xe_guc_comm_init_early(guc); xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); @@ -304,8 +355,18 @@ out: */ int xe_guc_init_post_hwconfig(struct xe_guc *guc) { + int ret; + + ret = xe_guc_realloc_post_hwconfig(guc); + if (ret) + return ret; + guc_init_params_post_hwconfig(guc); + ret = xe_guc_pc_init(&guc->pc); + if (ret) + return ret; + return xe_guc_ads_init_post_hwconfig(&guc->ads); } @@ -429,7 +490,6 @@ static int guc_wait_ucode(struct xe_guc *guc) if (ret) { struct drm_device *drm = &xe->drm; - struct drm_printer p = drm_info_printer(drm->dev); drm_info(drm, "GuC load failed: status = 0x%08X\n", status); drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", @@ -451,8 +511,6 @@ static int guc_wait_ucode(struct xe_guc *guc) SOFT_SCRATCH(13))); ret = -ENXIO; } - - xe_guc_log_print(&guc->log, &p); } else { drm_dbg(&xe->drm, "GuC successfully loaded"); } @@ -516,6 +574,9 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc) xe_guc_ads_populate_minimal(&guc->ads); + /* Raise GT freq to speed up HuC/GuC load */ + xe_guc_pc_init_early(&guc->pc); + ret = __xe_guc_upload(guc); if (ret) return ret; @@ -579,10 +640,20 @@ static void guc_enable_irq(struct xe_guc *guc) int xe_guc_enable_communication(struct xe_guc *guc) { + struct xe_device *xe = guc_to_xe(guc); int err; guc_enable_irq(guc); + if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) { + struct xe_gt *gt = guc_to_gt(guc); + struct xe_tile *tile = gt_to_tile(gt); + + err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); + if (err) + return err; + } + xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0); @@ -650,7 +721,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT); - xe_assert(xe, !guc->ct.enabled); + xe_assert(xe, !xe_guc_ct_enabled(&guc->ct)); xe_assert(xe, len); xe_assert(xe, len <= VF_SW_FLAG_COUNT); xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT); @@ -707,8 +778,12 @@ timeout: if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC)) goto proto; - if (unlikely(ret)) + if (unlikely(ret)) { + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != + GUC_HXG_TYPE_NO_RESPONSE_BUSY) + goto proto; goto timeout; + } } if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == @@ -832,7 +907,7 @@ int xe_guc_stop(struct xe_guc *guc) { int ret; - xe_guc_ct_disable(&guc->ct); + xe_guc_ct_stop(&guc->ct); ret = xe_guc_submit_stop(guc); if (ret) diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index d3e49e7fd7c3..94f2dc5f6f90 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -13,6 +13,7 @@ struct drm_printer; +void xe_guc_comm_init_early(struct xe_guc *guc); int xe_guc_init(struct xe_guc *guc); int xe_guc_init_post_hwconfig(struct xe_guc *guc); int xe_guc_post_load_init(struct xe_guc *guc); diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 390e6f1bf4e1..6ad4c1a90a78 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -273,7 +273,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) ads->regset_size = calculate_regset_size(gt); bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE, - XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 24a33fa36496..355edd4d758a 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -9,16 +9,21 @@ #include <linux/circ_buf.h> #include <linux/delay.h> +#include <kunit/static_stub.h> + #include <drm/drm_managed.h> #include "abi/guc_actions_abi.h" +#include "abi/guc_actions_sriov_abi.h" #include "abi/guc_klvs_abi.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_pagefault.h" +#include "xe_gt_printk.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" +#include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_map.h" #include "xe_pm.h" @@ -28,6 +33,7 @@ struct g2h_fence { u32 *response_buffer; u32 seqno; + u32 response_data; u16 response_len; u16 error; u16 hint; @@ -40,6 +46,7 @@ struct g2h_fence { static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) { g2h_fence->response_buffer = response_buffer; + g2h_fence->response_data = 0; g2h_fence->response_len = 0; g2h_fence->fail = false; g2h_fence->retry = false; @@ -148,7 +155,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) primelockdep(ct); bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), - XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -159,6 +166,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) if (err) return err; + xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); + ct->state = XE_GUC_CT_STATE_DISABLED; return 0; } @@ -278,12 +287,35 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) return ret > 0 ? -EPROTO : ret; } +static void xe_guc_ct_set_state(struct xe_guc_ct *ct, + enum xe_guc_ct_state state) +{ + mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ + spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ + + xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || + state == XE_GUC_CT_STATE_STOPPED); + + ct->g2h_outstanding = 0; + ct->state = state; + + spin_unlock_irq(&ct->fast_lock); + + /* + * Lockdep doesn't like this under the fast lock and he destroy only + * needs to be serialized with the send path which ct lock provides. + */ + xa_destroy(&ct->fence_lookup); + + mutex_unlock(&ct->lock); +} + int xe_guc_ct_enable(struct xe_guc_ct *ct) { struct xe_device *xe = ct_to_xe(ct); int err; - xe_assert(xe, !ct->enabled); + xe_assert(xe, !xe_guc_ct_enabled(ct)); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); @@ -300,12 +332,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (err) goto err_out; - mutex_lock(&ct->lock); - spin_lock_irq(&ct->fast_lock); - ct->g2h_outstanding = 0; - ct->enabled = true; - spin_unlock_irq(&ct->fast_lock); - mutex_unlock(&ct->lock); + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); smp_mb(); wake_up_all(&ct->wq); @@ -319,15 +346,34 @@ err_out: return err; } +static void stop_g2h_handler(struct xe_guc_ct *ct) +{ + cancel_work_sync(&ct->g2h_worker); +} + +/** + * xe_guc_ct_disable - Set GuC to disabled state + * @ct: the &xe_guc_ct + * + * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected + * in this transition. + */ void xe_guc_ct_disable(struct xe_guc_ct *ct) { - mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ - spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ - ct->enabled = false; /* Finally disable CT communication */ - spin_unlock_irq(&ct->fast_lock); - mutex_unlock(&ct->lock); + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); + stop_g2h_handler(ct); +} - xa_destroy(&ct->fence_lookup); +/** + * xe_guc_ct_stop - Set GuC to stopped state + * @ct: the &xe_guc_ct + * + * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h + */ +void xe_guc_ct_stop(struct xe_guc_ct *ct) +{ + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); + stop_g2h_handler(ct); } static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) @@ -448,7 +494,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, GUC_HXG_EVENT_MSG_0_DATA0, action[0]); } else { cmd[1] = - FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | GUC_HXG_EVENT_MSG_0_DATA0, action[0]); } @@ -475,13 +521,34 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, return 0; } +/* + * The CT protocol accepts a 16 bits fence. This field is fully owned by the + * driver, the GuC will just copy it to the reply message. Since we need to + * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, + * we use one bit of the seqno as an indicator for that and a rolling counter + * for the remaining 15 bits. + */ +#define CT_SEQNO_MASK GENMASK(14, 0) +#define CT_SEQNO_UNTRACKED BIT(15) +static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) +{ + u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; + + if (!is_g2h_fence) + seqno |= CT_SEQNO_UNTRACKED; + + return seqno; +} + static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) { struct xe_device *xe = ct_to_xe(ct); + u16 seqno; int ret; + xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); xe_assert(xe, !g2h_len || !g2h_fence); xe_assert(xe, !num_g2h || !g2h_fence); xe_assert(xe, !g2h_len || num_g2h); @@ -493,11 +560,18 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, goto out; } - if (unlikely(!ct->enabled)) { + if (ct->state == XE_GUC_CT_STATE_DISABLED) { ret = -ENODEV; goto out; } + if (ct->state == XE_GUC_CT_STATE_STOPPED) { + ret = -ECANCELED; + goto out; + } + + xe_assert(xe, xe_guc_ct_enabled(ct)); + if (g2h_fence) { g2h_len = GUC_CTB_HXG_MSG_MAX_LEN; num_g2h = 1; @@ -505,7 +579,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, if (g2h_fence_needs_alloc(g2h_fence)) { void *ptr; - g2h_fence->seqno = (ct->fence_seqno++ & 0xffff); + g2h_fence->seqno = next_ct_seqno(ct, true); ptr = xa_store(&ct->fence_lookup, g2h_fence->seqno, g2h_fence, GFP_ATOMIC); @@ -514,6 +588,10 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, goto out; } } + + seqno = g2h_fence->seqno; + } else { + seqno = next_ct_seqno(ct, false); } if (g2h_len) @@ -523,8 +601,7 @@ retry: if (unlikely(ret)) goto out_unlock; - ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0, - !!g2h_fence); + ret = h2g_write(ct, action, len, seqno, !!g2h_fence); if (unlikely(ret)) { if (ret == -EAGAIN) goto retry; @@ -682,7 +759,8 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) return false; #define ct_alive(ct) \ - (ct->enabled && !ct->ctbs.h2g.info.broken && !ct->ctbs.g2h.info.broken) + (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ + !ct->ctbs.g2h.info.broken) if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) return false; #undef ct_alive @@ -752,12 +830,31 @@ retry_same_fence: ret = -EIO; } - return ret > 0 ? 0 : ret; + return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret; } +/** + * xe_guc_ct_send_recv - Send and receive HXG to the GuC + * @ct: the &xe_guc_ct + * @action: the dword array with `HXG Request`_ message (can't be NULL) + * @len: length of the `HXG Request`_ message (in dwords, can't be 0) + * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL) + * + * Send a `HXG Request`_ message to the GuC over CT communication channel and + * blocks until GuC replies with a `HXG Response`_ message. + * + * For non-blocking communication with GuC use xe_guc_ct_send(). + * + * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_. + * + * Return: response length (in dwords) if &response_buffer was not NULL, or + * DATA0 from `HXG Response`_ if &response_buffer was NULL, or + * a negative error code on failure. + */ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 *response_buffer) { + KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); return guc_ct_send_recv(ct, action, len, response_buffer, false); } @@ -767,9 +864,20 @@ int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, return guc_ct_send_recv(ct, action, len, response_buffer, true); } +static u32 *msg_to_hxg(u32 *msg) +{ + return msg + GUC_CTB_MSG_MIN_LEN; +} + +static u32 msg_len_to_hxg_len(u32 len) +{ + return len - GUC_CTB_MSG_MIN_LEN; +} + static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) { - u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); + u32 *hxg = msg_to_hxg(msg); + u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); lockdep_assert_held(&ct->lock); @@ -786,18 +894,41 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) { - struct xe_device *xe = ct_to_xe(ct); - u32 response_len = len - GUC_CTB_MSG_MIN_LEN; + struct xe_gt *gt = ct_to_gt(ct); + struct xe_device *xe = gt_to_xe(gt); + u32 *hxg = msg_to_hxg(msg); + u32 hxg_len = msg_len_to_hxg_len(len); u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]); - u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]); + u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); struct g2h_fence *g2h_fence; lockdep_assert_held(&ct->lock); + /* + * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. + * Those messages should never fail, so if we do get an error back it + * means we're likely doing an illegal operation and the GuC is + * rejecting it. We have no way to inform the code that submitted the + * H2G that the message was rejected, so we need to escalate the + * failure to trigger a reset. + */ + if (fence & CT_SEQNO_UNTRACKED) { + if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) + xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n", + fence, + FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]), + FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0])); + else + xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", + type, fence); + + return -EPROTO; + } + g2h_fence = xa_erase(&ct->fence_lookup, fence); if (unlikely(!g2h_fence)) { /* Don't tear down channel, as send could've timed out */ - drm_warn(&xe->drm, "G2H fence (%u) not found!\n", fence); + xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); return 0; } @@ -806,18 +937,16 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { g2h_fence->fail = true; - g2h_fence->error = - FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[1]); - g2h_fence->hint = - FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[1]); + g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]); + g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]); } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { g2h_fence->retry = true; - g2h_fence->reason = - FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, msg[1]); + g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]); } else if (g2h_fence->response_buffer) { - g2h_fence->response_len = response_len; - memcpy(g2h_fence->response_buffer, msg + GUC_CTB_MSG_MIN_LEN, - response_len * sizeof(u32)); + g2h_fence->response_len = hxg_len; + memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32)); + } else { + g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]); } g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); @@ -833,14 +962,13 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) { struct xe_device *xe = ct_to_xe(ct); - u32 hxg, origin, type; + u32 *hxg = msg_to_hxg(msg); + u32 origin, type; int ret; lockdep_assert_held(&ct->lock); - hxg = msg[1]; - - origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg); + origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { drm_err(&xe->drm, "G2H channel broken on read, origin=%d, reset required\n", @@ -850,7 +978,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) return -EPROTO; } - type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg); + type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); switch (type) { case GUC_HXG_TYPE_EVENT: ret = parse_g2h_event(ct, msg, len); @@ -876,14 +1004,19 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) { struct xe_device *xe = ct_to_xe(ct); struct xe_guc *guc = ct_to_guc(ct); - u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); - u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN; - u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN; + u32 hxg_len = msg_len_to_hxg_len(len); + u32 *hxg = msg_to_hxg(msg); + u32 action, adj_len; + u32 *payload; int ret = 0; - if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT) + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) return 0; + action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); + payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN; + adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN; + switch (action) { case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: ret = xe_guc_sched_done_handler(guc, payload, adj_len); @@ -920,6 +1053,12 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) ret = xe_guc_access_counter_notify_handler(guc, payload, adj_len); break; + case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: + ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len); + break; + case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF: + ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len); + break; default: drm_err(&xe->drm, "unexpected action 0x%04x\n", action); } @@ -938,15 +1077,22 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) u32 tail, head, len; s32 avail; u32 action; + u32 *hxg; + xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); lockdep_assert_held(&ct->fast_lock); - if (!ct->enabled) + if (ct->state == XE_GUC_CT_STATE_DISABLED) return -ENODEV; + if (ct->state == XE_GUC_CT_STATE_STOPPED) + return -ECANCELED; + if (g2h->info.broken) return -EPIPE; + xe_assert(xe, xe_guc_ct_enabled(ct)); + /* Calculate DW available to read */ tail = desc_read(xe, g2h, tail); avail = tail - g2h->info.head; @@ -988,10 +1134,11 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) avail * sizeof(u32)); } - action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); + hxg = msg_to_hxg(msg); + action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); if (fast_path) { - if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT) + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) return 0; switch (action) { @@ -1017,9 +1164,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) { struct xe_device *xe = ct_to_xe(ct); struct xe_guc *guc = ct_to_guc(ct); - u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); - u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN; - u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN; + u32 hxg_len = msg_len_to_hxg_len(len); + u32 *hxg = msg_to_hxg(msg); + u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); + u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN; + u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN; int ret = 0; switch (action) { @@ -1245,7 +1394,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, return NULL; } - if (ct->enabled) { + if (xe_guc_ct_enabled(ct)) { snapshot->ct_enabled = true; snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, @@ -1271,7 +1420,7 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, return; if (snapshot->ct_enabled) { - drm_puts(p, "\nH2G CTB (all sizes in DW):\n"); + drm_puts(p, "H2G CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->h2g, p); drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); @@ -1280,7 +1429,7 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, drm_printf(p, "\tg2h outstanding: %d\n", snapshot->g2h_outstanding); } else { - drm_puts(p, "\nCT disabled\n"); + drm_puts(p, "CT disabled\n"); } } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index f15f8a4857e0..5083e099064f 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -13,6 +13,7 @@ struct drm_printer; int xe_guc_ct_init(struct xe_guc_ct *ct); int xe_guc_ct_enable(struct xe_guc_ct *ct); void xe_guc_ct_disable(struct xe_guc_ct *ct); +void xe_guc_ct_stop(struct xe_guc_ct *ct); void xe_guc_ct_fast_path(struct xe_guc_ct *ct); struct xe_guc_ct_snapshot * @@ -22,11 +23,18 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic); +static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct) +{ + return ct->state == XE_GUC_CT_STATE_ENABLED; +} + static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct) { + if (!xe_guc_ct_enabled(ct)) + return; + wake_up_all(&ct->wq); - if (ct->enabled) - queue_work(system_unbound_wq, &ct->g2h_worker); + queue_work(system_unbound_wq, &ct->g2h_worker); xe_guc_ct_fast_path(ct); } diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index d814d4ee3fc6..d29144c9f20b 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -73,6 +73,20 @@ struct xe_guc_ct_snapshot { }; /** + * enum xe_guc_ct_state - CT state + * @XE_GUC_CT_STATE_NOT_INITIALIZED: CT not initialized, messages not expected in this state + * @XE_GUC_CT_STATE_DISABLED: CT disabled, messages not expected in this state + * @XE_GUC_CT_STATE_STOPPED: CT stopped, drop messages without errors + * @XE_GUC_CT_STATE_ENABLED: CT enabled, messages sent / received in this state + */ +enum xe_guc_ct_state { + XE_GUC_CT_STATE_NOT_INITIALIZED = 0, + XE_GUC_CT_STATE_DISABLED, + XE_GUC_CT_STATE_STOPPED, + XE_GUC_CT_STATE_ENABLED, +}; + +/** * struct xe_guc_ct - GuC command transport (CT) layer * * Includes a pair of CT buffers for bi-directional communication and tracking @@ -87,17 +101,17 @@ struct xe_guc_ct { spinlock_t fast_lock; /** @ctbs: buffers for sending and receiving commands */ struct { - /** @send: Host to GuC (H2G, send) channel */ + /** @ctbs.send: Host to GuC (H2G, send) channel */ struct guc_ctb h2g; - /** @recv: GuC to Host (G2H, receive) channel */ + /** @ctbs.recv: GuC to Host (G2H, receive) channel */ struct guc_ctb g2h; } ctbs; /** @g2h_outstanding: number of outstanding G2H */ u32 g2h_outstanding; /** @g2h_worker: worker to process G2H messages */ struct work_struct g2h_worker; - /** @enabled: CT enabled */ - bool enabled; + /** @state: CT state */ + enum xe_guc_ct_state state; /** @fence_seqno: G2H fence seqno - 16 bits used by CT */ u32 fence_seqno; /** @fence_lookup: G2H fence lookup */ diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.c b/drivers/gpu/drm/xe/xe_guc_db_mgr.c new file mode 100644 index 000000000000..8d9a0287df6b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <linux/bitmap.h> +#include <linux/mutex.h> + +#include <drm/drm_managed.h> + +#include "regs/xe_guc_regs.h" + +#include "xe_assert.h" +#include "xe_gt_printk.h" +#include "xe_guc.h" +#include "xe_guc_db_mgr.h" +#include "xe_guc_types.h" + +/** + * DOC: GuC Doorbells + * + * The GFX doorbell solution provides a mechanism for submission of workload + * to the graphics hardware by a ring3 application without the penalty of + * ring transition for each workload submission. + * + * In SR-IOV mode, the doorbells are treated as shared resource and PF must + * be able to provision exclusive range of IDs across VFs, which may want to + * use this feature. + */ + +static struct xe_guc *dbm_to_guc(struct xe_guc_db_mgr *dbm) +{ + return container_of(dbm, struct xe_guc, dbm); +} + +static struct xe_gt *dbm_to_gt(struct xe_guc_db_mgr *dbm) +{ + return guc_to_gt(dbm_to_guc(dbm)); +} + +static struct xe_device *dbm_to_xe(struct xe_guc_db_mgr *dbm) +{ + return gt_to_xe(dbm_to_gt(dbm)); +} + +#define dbm_assert(_dbm, _cond) xe_gt_assert(dbm_to_gt(_dbm), _cond) +#define dbm_mutex(_dbm) (&dbm_to_guc(_dbm)->submission_state.lock) + +static void dbm_print_locked(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent); + +static void __fini_dbm(struct drm_device *drm, void *arg) +{ + struct xe_guc_db_mgr *dbm = arg; + unsigned int weight; + + mutex_lock(dbm_mutex(dbm)); + + weight = bitmap_weight(dbm->bitmap, dbm->count); + if (weight) { + struct drm_printer p = xe_gt_info_printer(dbm_to_gt(dbm)); + + xe_gt_err(dbm_to_gt(dbm), "GuC doorbells manager unclean (%u/%u)\n", + weight, dbm->count); + dbm_print_locked(dbm, &p, 1); + } + + bitmap_free(dbm->bitmap); + dbm->bitmap = NULL; + dbm->count = 0; + + mutex_unlock(dbm_mutex(dbm)); +} + +/** + * xe_guc_db_mgr_init() - Initialize GuC Doorbells Manager. + * @dbm: the &xe_guc_db_mgr to initialize + * @count: number of doorbells to manage + * + * The bare-metal or PF driver can pass ~0 as &count to indicate that all + * doorbells supported by the hardware are available for use. + * + * Only VF's drivers will have to provide explicit number of doorbells IDs + * that they can use. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count) +{ + int ret; + + if (count == ~0) + count = GUC_NUM_DOORBELLS; + + dbm_assert(dbm, !dbm->bitmap); + dbm_assert(dbm, count <= GUC_NUM_DOORBELLS); + + if (!count) + goto done; + + dbm->bitmap = bitmap_zalloc(count, GFP_KERNEL); + if (!dbm->bitmap) + return -ENOMEM; + dbm->count = count; + + ret = drmm_add_action_or_reset(&dbm_to_xe(dbm)->drm, __fini_dbm, dbm); + if (ret) + return ret; +done: + xe_gt_dbg(dbm_to_gt(dbm), "using %u doorbell(s)\n", dbm->count); + return 0; +} + +static int dbm_reserve_chunk_locked(struct xe_guc_db_mgr *dbm, + unsigned int count, unsigned int spare) +{ + unsigned int used; + int index; + + dbm_assert(dbm, count); + dbm_assert(dbm, count <= GUC_NUM_DOORBELLS); + dbm_assert(dbm, dbm->count <= GUC_NUM_DOORBELLS); + lockdep_assert_held(dbm_mutex(dbm)); + + if (!dbm->count) + return -ENODATA; + + if (spare) { + used = bitmap_weight(dbm->bitmap, dbm->count); + if (used + count + spare > dbm->count) + return -EDQUOT; + } + + index = bitmap_find_next_zero_area(dbm->bitmap, dbm->count, 0, count, 0); + if (index >= dbm->count) + return -ENOSPC; + + bitmap_set(dbm->bitmap, index, count); + + return index; +} + +static void dbm_release_chunk_locked(struct xe_guc_db_mgr *dbm, + unsigned int start, unsigned int count) +{ + dbm_assert(dbm, count); + dbm_assert(dbm, count <= GUC_NUM_DOORBELLS); + dbm_assert(dbm, dbm->count); + dbm_assert(dbm, dbm->count <= GUC_NUM_DOORBELLS); + lockdep_assert_held(dbm_mutex(dbm)); + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + unsigned int n; + + for (n = 0; n < count; n++) + dbm_assert(dbm, test_bit(start + n, dbm->bitmap)); + } + bitmap_clear(dbm->bitmap, start, count); +} + +/** + * xe_guc_db_mgr_reserve_id_locked() - Reserve a single GuC Doorbell ID. + * @dbm: the &xe_guc_db_mgr + * + * This function expects that submission lock is already taken. + * + * Return: ID of the allocated GuC doorbell or a negative error code on failure. + */ +int xe_guc_db_mgr_reserve_id_locked(struct xe_guc_db_mgr *dbm) +{ + return dbm_reserve_chunk_locked(dbm, 1, 0); +} + +/** + * xe_guc_db_mgr_release_id_locked() - Release a single GuC Doorbell ID. + * @dbm: the &xe_guc_db_mgr + * @id: the GuC Doorbell ID to release + * + * This function expects that submission lock is already taken. + */ +void xe_guc_db_mgr_release_id_locked(struct xe_guc_db_mgr *dbm, unsigned int id) +{ + return dbm_release_chunk_locked(dbm, id, 1); +} + +/** + * xe_guc_db_mgr_reserve_range() - Reserve a range of GuC Doorbell IDs. + * @dbm: the &xe_guc_db_mgr + * @count: number of GuC doorbell IDs to reserve + * @spare: number of GuC doorbell IDs to keep available + * + * This function is dedicated for the for use by the PF which expects that + * allocated range for the VF will be contiguous and that there will be at + * least &spare IDs still available for the PF use after this reservation. + * + * Return: starting ID of the allocated GuC doorbell ID range or + * a negative error code on failure. + */ +int xe_guc_db_mgr_reserve_range(struct xe_guc_db_mgr *dbm, + unsigned int count, unsigned int spare) +{ + int ret; + + mutex_lock(dbm_mutex(dbm)); + ret = dbm_reserve_chunk_locked(dbm, count, spare); + mutex_unlock(dbm_mutex(dbm)); + + return ret; +} + +/** + * xe_guc_db_mgr_release_range() - Release a range of Doorbell IDs. + * @dbm: the &xe_guc_db_mgr + * @start: the starting ID of GuC doorbell ID range to release + * @count: number of GuC doorbell IDs to release + */ +void xe_guc_db_mgr_release_range(struct xe_guc_db_mgr *dbm, + unsigned int start, unsigned int count) +{ + mutex_lock(dbm_mutex(dbm)); + dbm_release_chunk_locked(dbm, start, count); + mutex_unlock(dbm_mutex(dbm)); +} + +static void dbm_print_locked(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent) +{ + unsigned int rs, re; + unsigned int total; + + drm_printf_indent(p, indent, "count: %u\n", dbm->count); + if (!dbm->bitmap) + return; + + total = 0; + for_each_clear_bitrange(rs, re, dbm->bitmap, dbm->count) { + drm_printf_indent(p, indent, "available range: %u..%u (%u)\n", + rs, re - 1, re - rs); + total += re - rs; + } + drm_printf_indent(p, indent, "available total: %u\n", total); + + total = 0; + for_each_set_bitrange(rs, re, dbm->bitmap, dbm->count) { + drm_printf_indent(p, indent, "reserved range: %u..%u (%u)\n", + rs, re - 1, re - rs); + total += re - rs; + } + drm_printf_indent(p, indent, "reserved total: %u\n", total); +} + +/** + * xe_guc_db_mgr_print() - Print status of GuC Doorbells Manager. + * @dbm: the &xe_guc_db_mgr to print + * @p: the &drm_printer to print to + * @indent: tab indentation level + */ +void xe_guc_db_mgr_print(struct xe_guc_db_mgr *dbm, + struct drm_printer *p, int indent) +{ + mutex_lock(dbm_mutex(dbm)); + dbm_print_locked(dbm, p, indent); + mutex_unlock(dbm_mutex(dbm)); +} + +#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_guc_db_mgr_test.c" +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_db_mgr.h b/drivers/gpu/drm/xe/xe_guc_db_mgr.h new file mode 100644 index 000000000000..c250fa0ca9d6 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_db_mgr.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GUC_DB_MGR_H_ +#define _XE_GUC_DB_MGR_H_ + +struct drm_printer; +struct xe_guc_db_mgr; + +int xe_guc_db_mgr_init(struct xe_guc_db_mgr *dbm, unsigned int count); + +int xe_guc_db_mgr_reserve_id_locked(struct xe_guc_db_mgr *dbm); +void xe_guc_db_mgr_release_id_locked(struct xe_guc_db_mgr *dbm, unsigned int id); + +int xe_guc_db_mgr_reserve_range(struct xe_guc_db_mgr *dbm, unsigned int count, unsigned int spare); +void xe_guc_db_mgr_release_range(struct xe_guc_db_mgr *dbm, unsigned int start, unsigned int count); + +void xe_guc_db_mgr_print(struct xe_guc_db_mgr *dbm, struct drm_printer *p, int indent); + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index 4dd5a88a7826..c281fdbfd2d6 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -97,6 +97,7 @@ struct guc_update_exec_queue_policy { #define GUC_WA_POLLCS BIT(18) #define GUC_WA_RENDER_RST_RC6_EXIT BIT(19) #define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21) +#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c index 2a13a00917f8..ea49f3885c10 100644 --- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c +++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c @@ -78,7 +78,7 @@ int xe_guc_hwconfig_init(struct xe_guc *guc) return -EINVAL; bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size), - XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h b/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h new file mode 100644 index 000000000000..aeeb573c6842 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_hxg_helpers.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GUC_HXG_HELPERS_H_ +#define _XE_GUC_HXG_HELPERS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> + +#include "abi/guc_messages_abi.h" + +/** + * hxg_sizeof - Queries size of the object or type (in HXG units). + * @T: the object or type + * + * Force a compilation error if actual size is not aligned to HXG unit (u32). + * + * Return: size in dwords (u32). + */ +#define hxg_sizeof(T) (sizeof(T) / sizeof(u32) + BUILD_BUG_ON_ZERO(sizeof(T) % sizeof(u32))) + +static inline const char *guc_hxg_type_to_string(unsigned int type) +{ + switch (type) { + case GUC_HXG_TYPE_REQUEST: + return "request"; + case GUC_HXG_TYPE_FAST_REQUEST: + return "fast-request"; + case GUC_HXG_TYPE_EVENT: + return "event"; + case GUC_HXG_TYPE_NO_RESPONSE_BUSY: + return "busy"; + case GUC_HXG_TYPE_NO_RESPONSE_RETRY: + return "retry"; + case GUC_HXG_TYPE_RESPONSE_FAILURE: + return "failure"; + case GUC_HXG_TYPE_RESPONSE_SUCCESS: + return "response"; + default: + return "<invalid>"; + } +} + +static inline bool guc_hxg_type_is_action(unsigned int type) +{ + switch (type) { + case GUC_HXG_TYPE_REQUEST: + case GUC_HXG_TYPE_FAST_REQUEST: + case GUC_HXG_TYPE_EVENT: + return true; + default: + return false; + } +} + +static inline bool guc_hxg_type_is_reply(unsigned int type) +{ + switch (type) { + case GUC_HXG_TYPE_NO_RESPONSE_BUSY: + case GUC_HXG_TYPE_NO_RESPONSE_RETRY: + case GUC_HXG_TYPE_RESPONSE_FAILURE: + case GUC_HXG_TYPE_RESPONSE_SUCCESS: + return true; + default: + return false; + } +} + +static inline u32 guc_hxg_msg_encode_success(u32 *msg, u32 data0) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) | + FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, data0); + + return GUC_HXG_RESPONSE_MSG_MIN_LEN; +} + +static inline u32 guc_hxg_msg_encode_failure(u32 *msg, u32 error, u32 hint) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) | + FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) | + FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error); + + return GUC_HXG_FAILURE_MSG_LEN; +} + +static inline u32 guc_hxg_msg_encode_busy(u32 *msg, u32 counter) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_NO_RESPONSE_BUSY) | + FIELD_PREP(GUC_HXG_BUSY_MSG_0_COUNTER, counter); + + return GUC_HXG_BUSY_MSG_LEN; +} + +static inline u32 guc_hxg_msg_encode_retry(u32 *msg, u32 reason) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_NO_RESPONSE_RETRY) | + FIELD_PREP(GUC_HXG_RETRY_MSG_0_REASON, reason); + + return GUC_HXG_RETRY_MSG_LEN; +} + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index bcd2f4d34081..45135c3520e5 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -84,7 +84,7 @@ int xe_guc_log_init(struct xe_guc_log *log) struct xe_bo *bo; bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(), - XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index d91702592520..2839d685631b 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -956,10 +956,12 @@ out: /** * xe_guc_pc_fini - Finalize GuC's Power Conservation component - * @pc: Xe_GuC_PC instance + * @drm: DRM device + * @arg: opaque pointer that should point to Xe_GuC_PC instance */ -void xe_guc_pc_fini(struct xe_guc_pc *pc) +static void xe_guc_pc_fini(struct drm_device *drm, void *arg) { + struct xe_guc_pc *pc = arg; struct xe_device *xe = pc_to_xe(pc); if (xe->info.skip_guc_pc) { @@ -969,9 +971,10 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc) return; } + xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); XE_WARN_ON(xe_guc_pc_gucrc_disable(pc)); XE_WARN_ON(xe_guc_pc_stop(pc)); - mutex_destroy(&pc->freq_lock); + xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); } /** @@ -985,11 +988,14 @@ int xe_guc_pc_init(struct xe_guc_pc *pc) struct xe_device *xe = gt_to_xe(gt); struct xe_bo *bo; u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + int err; if (xe->info.skip_guc_pc) return 0; - mutex_init(&pc->freq_lock); + err = drmm_mutex_init(&xe->drm, &pc->freq_lock); + if (err) + return err; bo = xe_managed_bo_create_pin_map(xe, tile, size, XE_BO_CREATE_VRAM_IF_DGFX(tile) | @@ -998,5 +1004,10 @@ int xe_guc_pc_init(struct xe_guc_pc *pc) return PTR_ERR(bo); pc->bo = bo; + + err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc); + if (err) + return err; + return 0; } diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h index cecad8e9300b..d3680d89490e 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.h +++ b/drivers/gpu/drm/xe/xe_guc_pc.h @@ -9,7 +9,6 @@ #include "xe_guc_pc_types.h" int xe_guc_pc_init(struct xe_guc_pc *pc); -void xe_guc_pc_fini(struct xe_guc_pc *pc); int xe_guc_pc_start(struct xe_guc_pc *pc); int xe_guc_pc_stop(struct xe_guc_pc *pc); int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc); diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c new file mode 100644 index 000000000000..c0a2d8d5d3b3 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_relay.c @@ -0,0 +1,941 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> + +#include <drm/drm_managed.h> + +#include <kunit/static_stub.h> +#include <kunit/test-bug.h> + +#include "abi/guc_actions_sriov_abi.h" +#include "abi/guc_relay_actions_abi.h" +#include "abi/guc_relay_communication_abi.h" + +#include "xe_assert.h" +#include "xe_device.h" +#include "xe_gt.h" +#include "xe_gt_sriov_printk.h" +#include "xe_guc.h" +#include "xe_guc_ct.h" +#include "xe_guc_hxg_helpers.h" +#include "xe_guc_relay.h" +#include "xe_guc_relay_types.h" +#include "xe_sriov.h" + +/* + * How long should we wait for the response? + * XXX this value is subject for the profiling. + */ +#define RELAY_TIMEOUT_MSEC (2500) + +static void relays_worker_fn(struct work_struct *w); + +static struct xe_guc *relay_to_guc(struct xe_guc_relay *relay) +{ + return container_of(relay, struct xe_guc, relay); +} + +static struct xe_guc_ct *relay_to_ct(struct xe_guc_relay *relay) +{ + return &relay_to_guc(relay)->ct; +} + +static struct xe_gt *relay_to_gt(struct xe_guc_relay *relay) +{ + return guc_to_gt(relay_to_guc(relay)); +} + +static struct xe_device *relay_to_xe(struct xe_guc_relay *relay) +{ + return gt_to_xe(relay_to_gt(relay)); +} + +#define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition) +#define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg) +#define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg) + +static int relay_get_totalvfs(struct xe_guc_relay *relay) +{ + struct xe_device *xe = relay_to_xe(relay); + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + + KUNIT_STATIC_STUB_REDIRECT(relay_get_totalvfs, relay); + return IS_SRIOV_VF(xe) ? 0 : pci_sriov_get_totalvfs(pdev); +} + +static bool relay_is_ready(struct xe_guc_relay *relay) +{ + return mempool_initialized(&relay->pool); +} + +static u32 relay_get_next_rid(struct xe_guc_relay *relay) +{ + u32 rid; + + spin_lock(&relay->lock); + rid = ++relay->last_rid; + spin_unlock(&relay->lock); + + return rid; +} + +/** + * struct relay_transaction - internal data used to handle transactions + * + * Relation between struct relay_transaction members:: + * + * <-------------------- GUC_CTB_MAX_DWORDS --------------> + * <-------- GUC_RELAY_MSG_MAX_LEN ---> + * <--- offset ---> <--- request_len -------> + * +----------------+-------------------------+----------+--+ + * | | | | | + * +----------------+-------------------------+----------+--+ + * ^ ^ + * / / + * request_buf request + * + * <-------------------- GUC_CTB_MAX_DWORDS --------------> + * <-------- GUC_RELAY_MSG_MAX_LEN ---> + * <--- offset ---> <--- response_len ---> + * +----------------+----------------------+-------------+--+ + * | | | | | + * +----------------+----------------------+-------------+--+ + * ^ ^ + * / / + * response_buf response + */ +struct relay_transaction { + /** + * @incoming: indicates whether this transaction represents an incoming + * request from the remote VF/PF or this transaction + * represents outgoing request to the remote VF/PF. + */ + bool incoming; + + /** + * @remote: PF/VF identifier of the origin (or target) of the relay + * request message. + */ + u32 remote; + + /** @rid: identifier of the VF/PF relay message. */ + u32 rid; + + /** + * @request: points to the inner VF/PF request message, copied to the + * #response_buf starting at #offset. + */ + u32 *request; + + /** @request_len: length of the inner VF/PF request message. */ + u32 request_len; + + /** + * @response: points to the placeholder buffer where inner VF/PF + * response will be located, for outgoing transaction + * this could be caller's buffer (if provided) otherwise + * it points to the #response_buf starting at #offset. + */ + u32 *response; + + /** + * @response_len: length of the inner VF/PF response message (only + * if #status is 0), initially set to the size of the + * placeholder buffer where response message will be + * copied. + */ + u32 response_len; + + /** + * @offset: offset to the start of the inner VF/PF relay message inside + * buffers; this offset is equal the length of the outer GuC + * relay header message. + */ + u32 offset; + + /** + * @request_buf: buffer with VF/PF request message including outer + * transport message. + */ + u32 request_buf[GUC_CTB_MAX_DWORDS]; + + /** + * @response_buf: buffer with VF/PF response message including outer + * transport message. + */ + u32 response_buf[GUC_CTB_MAX_DWORDS]; + + /** + * @reply: status of the reply, 0 means that data pointed by the + * #response is valid. + */ + int reply; + + /** @done: completion of the outgoing transaction. */ + struct completion done; + + /** @link: transaction list link */ + struct list_head link; +}; + +static u32 prepare_pf2guc(u32 *msg, u32 target, u32 rid) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_PF2GUC_RELAY_TO_VF); + msg[1] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, target); + msg[2] = FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, rid); + + return PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN; +} + +static u32 prepare_vf2guc(u32 *msg, u32 rid) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, XE_GUC_ACTION_VF2GUC_RELAY_TO_PF); + msg[1] = FIELD_PREP(VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID, rid); + + return VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN; +} + +static struct relay_transaction * +__relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u32 rid, + const u32 *action, u32 action_len, u32 *resp, u32 resp_size) +{ + struct relay_transaction *txn; + + relay_assert(relay, action_len >= GUC_RELAY_MSG_MIN_LEN); + relay_assert(relay, action_len <= GUC_RELAY_MSG_MAX_LEN); + relay_assert(relay, !(!!resp ^ !!resp_size)); + relay_assert(relay, resp_size <= GUC_RELAY_MSG_MAX_LEN); + relay_assert(relay, resp_size == 0 || resp_size >= GUC_RELAY_MSG_MIN_LEN); + + if (unlikely(!relay_is_ready(relay))) + return ERR_PTR(-ENODEV); + + /* + * For incoming requests we can't use GFP_KERNEL as those are delivered + * with CTB lock held which is marked as used in the reclaim path. + * Btw, that's one of the reason why we use mempool here! + */ + txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL); + if (!txn) + return ERR_PTR(-ENOMEM); + + txn->incoming = incoming; + txn->remote = remote; + txn->rid = rid; + txn->offset = remote ? + prepare_pf2guc(incoming ? txn->response_buf : txn->request_buf, remote, rid) : + prepare_vf2guc(incoming ? txn->response_buf : txn->request_buf, rid); + + relay_assert(relay, txn->offset); + relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->request_buf)); + relay_assert(relay, txn->offset + GUC_RELAY_MSG_MAX_LEN <= ARRAY_SIZE(txn->response_buf)); + + txn->request = txn->request_buf + txn->offset; + memcpy(&txn->request_buf[txn->offset], action, sizeof(u32) * action_len); + txn->request_len = action_len; + + txn->response = resp ?: txn->response_buf + txn->offset; + txn->response_len = resp_size ?: GUC_RELAY_MSG_MAX_LEN; + txn->reply = -ENOMSG; + INIT_LIST_HEAD(&txn->link); + init_completion(&txn->done); + + return txn; +} + +static struct relay_transaction * +relay_new_transaction(struct xe_guc_relay *relay, u32 target, const u32 *action, u32 len, + u32 *resp, u32 resp_size) +{ + u32 rid = relay_get_next_rid(relay); + + return __relay_get_transaction(relay, false, target, rid, action, len, resp, resp_size); +} + +static struct relay_transaction * +relay_new_incoming_transaction(struct xe_guc_relay *relay, u32 origin, u32 rid, + const u32 *action, u32 len) +{ + return __relay_get_transaction(relay, true, origin, rid, action, len, NULL, 0); +} + +static void relay_release_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn) +{ + relay_assert(relay, list_empty(&txn->link)); + + txn->offset = 0; + txn->response = NULL; + txn->reply = -ESTALE; + mempool_free(txn, &relay->pool); +} + +static int relay_send_transaction(struct xe_guc_relay *relay, struct relay_transaction *txn) +{ + u32 len = txn->incoming ? txn->response_len : txn->request_len; + u32 *buf = txn->incoming ? txn->response_buf : txn->request_buf; + u32 *msg = buf + txn->offset; + int ret; + + relay_assert(relay, txn->offset); + relay_assert(relay, txn->offset + len <= GUC_CTB_MAX_DWORDS); + relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN); + relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN); + + relay_debug(relay, "sending %s.%u to %u = %*ph\n", + guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), + txn->rid, txn->remote, (int)sizeof(u32) * len, msg); + + ret = xe_guc_ct_send_block(relay_to_ct(relay), buf, len + txn->offset); + + if (unlikely(ret > 0)) { + relay_notice(relay, "Unexpected data=%d from GuC, wrong ABI?\n", ret); + ret = -EPROTO; + } + if (unlikely(ret < 0)) { + relay_notice(relay, "Failed to send %s.%x to GuC (%pe) %*ph ...\n", + guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, buf[0])), + FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, buf[0]), + ERR_PTR(ret), (int)sizeof(u32) * txn->offset, buf); + relay_notice(relay, "Failed to send %s.%u to %u (%pe) %*ph\n", + guc_hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), + txn->rid, txn->remote, ERR_PTR(ret), (int)sizeof(u32) * len, msg); + } + + return ret; +} + +static void __fini_relay(struct drm_device *drm, void *arg) +{ + struct xe_guc_relay *relay = arg; + + mempool_exit(&relay->pool); +} + +/** + * xe_guc_relay_init - Initialize a &xe_guc_relay + * @relay: the &xe_guc_relay to initialize + * + * Initialize remaining members of &xe_guc_relay that may depend + * on the SR-IOV mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_relay_init(struct xe_guc_relay *relay) +{ + const int XE_RELAY_MEMPOOL_MIN_NUM = 1; + struct xe_device *xe = relay_to_xe(relay); + int err; + + relay_assert(relay, !relay_is_ready(relay)); + + if (!IS_SRIOV(xe)) + return 0; + + spin_lock_init(&relay->lock); + INIT_WORK(&relay->worker, relays_worker_fn); + INIT_LIST_HEAD(&relay->pending_relays); + INIT_LIST_HEAD(&relay->incoming_actions); + + err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM + + relay_get_totalvfs(relay), + sizeof(struct relay_transaction)); + if (err) + return err; + + relay_debug(relay, "using mempool with %d elements\n", relay->pool.min_nr); + + return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay); +} + +static u32 to_relay_error(int err) +{ + /* XXX: assume that relay errors match errno codes */ + return err < 0 ? -err : GUC_RELAY_ERROR_UNDISCLOSED; +} + +static int from_relay_error(u32 error) +{ + /* XXX: assume that relay errors match errno codes */ + return error ? -error : -ENODATA; +} + +static u32 sanitize_relay_error(u32 error) +{ + /* XXX TBD if generic error codes will be allowed */ + if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) + error = GUC_RELAY_ERROR_UNDISCLOSED; + return error; +} + +static u32 sanitize_relay_error_hint(u32 hint) +{ + /* XXX TBD if generic error codes will be allowed */ + if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) + hint = 0; + return hint; +} + +static u32 prepare_error_reply(u32 *msg, u32 error, u32 hint) +{ + msg[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) | + FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) | + FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error); + + XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_ERROR, error)); + XE_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_HINT, hint)); + + return GUC_HXG_FAILURE_MSG_LEN; +} + +static void relay_testonly_nop(struct xe_guc_relay *relay) +{ + KUNIT_STATIC_STUB_REDIRECT(relay_testonly_nop, relay); +} + +static int relay_send_message_and_wait(struct xe_guc_relay *relay, + struct relay_transaction *txn, + u32 *buf, u32 buf_size) +{ + unsigned long timeout = msecs_to_jiffies(RELAY_TIMEOUT_MSEC); + u32 *msg = &txn->request_buf[txn->offset]; + u32 len = txn->request_len; + u32 type, action, data0; + int ret; + long n; + + type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); + action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); + data0 = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); + + relay_debug(relay, "%s.%u to %u action %#x:%u\n", + guc_hxg_type_to_string(type), + txn->rid, txn->remote, action, data0); + + /* list ordering does not need to match RID ordering */ + spin_lock(&relay->lock); + list_add_tail(&txn->link, &relay->pending_relays); + spin_unlock(&relay->lock); + +resend: + ret = relay_send_transaction(relay, txn); + if (unlikely(ret < 0)) + goto unlink; + +wait: + n = wait_for_completion_timeout(&txn->done, timeout); + if (unlikely(n == 0 && txn->reply)) { + ret = -ETIME; + goto unlink; + } + + relay_debug(relay, "%u.%u reply %d after %u msec\n", + txn->remote, txn->rid, txn->reply, jiffies_to_msecs(timeout - n)); + if (unlikely(txn->reply)) { + reinit_completion(&txn->done); + if (txn->reply == -EAGAIN) + goto resend; + if (txn->reply == -EBUSY) { + relay_testonly_nop(relay); + goto wait; + } + if (txn->reply > 0) + ret = from_relay_error(txn->reply); + else + ret = txn->reply; + goto unlink; + } + + relay_debug(relay, "%u.%u response %*ph\n", txn->remote, txn->rid, + (int)sizeof(u32) * txn->response_len, txn->response); + relay_assert(relay, txn->response_len >= GUC_RELAY_MSG_MIN_LEN); + ret = txn->response_len; + +unlink: + spin_lock(&relay->lock); + list_del_init(&txn->link); + spin_unlock(&relay->lock); + + if (unlikely(ret < 0)) { + relay_notice(relay, "Unsuccessful %s.%u %#x:%u to %u (%pe) %*ph\n", + guc_hxg_type_to_string(type), txn->rid, + action, data0, txn->remote, ERR_PTR(ret), + (int)sizeof(u32) * len, msg); + } + + return ret; +} + +static int relay_send_to(struct xe_guc_relay *relay, u32 target, + const u32 *msg, u32 len, u32 *buf, u32 buf_size) +{ + struct relay_transaction *txn; + int ret; + + relay_assert(relay, len >= GUC_RELAY_MSG_MIN_LEN); + relay_assert(relay, len <= GUC_RELAY_MSG_MAX_LEN); + relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_HOST); + relay_assert(relay, guc_hxg_type_is_action(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]))); + + if (unlikely(!relay_is_ready(relay))) + return -ENODEV; + + txn = relay_new_transaction(relay, target, msg, len, buf, buf_size); + if (IS_ERR(txn)) + return PTR_ERR(txn); + + switch (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])) { + case GUC_HXG_TYPE_REQUEST: + ret = relay_send_message_and_wait(relay, txn, buf, buf_size); + break; + case GUC_HXG_TYPE_FAST_REQUEST: + relay_assert(relay, !GUC_HXG_TYPE_FAST_REQUEST); + fallthrough; + case GUC_HXG_TYPE_EVENT: + ret = relay_send_transaction(relay, txn); + break; + default: + ret = -EINVAL; + break; + } + + relay_release_transaction(relay, txn); + return ret; +} + +#ifdef CONFIG_PCI_IOV +/** + * xe_guc_relay_send_to_vf - Send a message to the VF. + * @relay: the &xe_guc_relay which will send the message + * @target: target VF number + * @msg: request message to be sent + * @len: length of the request message (in dwords, can't be 0) + * @buf: placeholder for the response message + * @buf_size: size of the response message placeholder (in dwords) + * + * This function can only be used by the driver running in the SR-IOV PF mode. + * + * Return: Non-negative response length (in dwords) or + * a negative error code on failure. + */ +int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target, + const u32 *msg, u32 len, u32 *buf, u32 buf_size) +{ + relay_assert(relay, IS_SRIOV_PF(relay_to_xe(relay))); + + return relay_send_to(relay, target, msg, len, buf, buf_size); +} +#endif + +/** + * xe_guc_relay_send_to_pf - Send a message to the PF. + * @relay: the &xe_guc_relay which will send the message + * @msg: request message to be sent + * @len: length of the message (in dwords, can't be 0) + * @buf: placeholder for the response message + * @buf_size: size of the response message placeholder (in dwords) + * + * This function can only be used by driver running in SR-IOV VF mode. + * + * Return: Non-negative response length (in dwords) or + * a negative error code on failure. + */ +int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay, + const u32 *msg, u32 len, u32 *buf, u32 buf_size) +{ + relay_assert(relay, IS_SRIOV_VF(relay_to_xe(relay))); + + return relay_send_to(relay, PFID, msg, len, buf, buf_size); +} + +static int relay_handle_reply(struct xe_guc_relay *relay, u32 origin, + u32 rid, int reply, const u32 *msg, u32 len) +{ + struct relay_transaction *pending; + int err = -ESRCH; + + spin_lock(&relay->lock); + list_for_each_entry(pending, &relay->pending_relays, link) { + if (pending->remote != origin || pending->rid != rid) { + relay_debug(relay, "%u.%u still awaits response\n", + pending->remote, pending->rid); + continue; + } + err = 0; /* found! */ + if (reply == 0) { + if (len > pending->response_len) { + reply = -ENOBUFS; + err = -ENOBUFS; + } else { + memcpy(pending->response, msg, 4 * len); + pending->response_len = len; + } + } + pending->reply = reply; + complete_all(&pending->done); + break; + } + spin_unlock(&relay->lock); + + return err; +} + +static int relay_handle_failure(struct xe_guc_relay *relay, u32 origin, + u32 rid, const u32 *msg, u32 len) +{ + int error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]); + u32 hint __maybe_unused = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]); + + relay_assert(relay, len); + relay_debug(relay, "%u.%u error %#x (%pe) hint %u debug %*ph\n", + origin, rid, error, ERR_PTR(-error), hint, 4 * (len - 1), msg + 1); + + return relay_handle_reply(relay, origin, rid, error ?: -EREMOTEIO, NULL, 0); +} + +static int relay_testloop_action_handler(struct xe_guc_relay *relay, u32 origin, + const u32 *msg, u32 len, u32 *response, u32 size) +{ + static ktime_t last_reply = 0; + u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); + u32 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); + u32 opcode = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); + ktime_t now = ktime_get(); + bool busy; + int ret; + + relay_assert(relay, guc_hxg_type_is_action(type)); + relay_assert(relay, action == GUC_RELAY_ACTION_VFXPF_TESTLOOP); + + if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) + return -ECONNREFUSED; + + if (!last_reply) + last_reply = now; + busy = ktime_before(now, ktime_add_ms(last_reply, 2 * RELAY_TIMEOUT_MSEC)); + if (!busy) + last_reply = now; + + switch (opcode) { + case VFXPF_TESTLOOP_OPCODE_NOP: + if (type == GUC_HXG_TYPE_EVENT) + return 0; + return guc_hxg_msg_encode_success(response, 0); + case VFXPF_TESTLOOP_OPCODE_BUSY: + if (type == GUC_HXG_TYPE_EVENT) + return -EPROTO; + msleep(RELAY_TIMEOUT_MSEC / 8); + if (busy) + return -EINPROGRESS; + return guc_hxg_msg_encode_success(response, 0); + case VFXPF_TESTLOOP_OPCODE_RETRY: + if (type == GUC_HXG_TYPE_EVENT) + return -EPROTO; + msleep(RELAY_TIMEOUT_MSEC / 8); + if (busy) + return guc_hxg_msg_encode_retry(response, 0); + return guc_hxg_msg_encode_success(response, 0); + case VFXPF_TESTLOOP_OPCODE_ECHO: + if (type == GUC_HXG_TYPE_EVENT) + return -EPROTO; + if (size < len) + return -ENOBUFS; + ret = guc_hxg_msg_encode_success(response, len); + memcpy(response + ret, msg + ret, (len - ret) * sizeof(u32)); + return len; + case VFXPF_TESTLOOP_OPCODE_FAIL: + return -EHWPOISON; + default: + break; + } + + relay_notice(relay, "Unexpected action %#x opcode %#x\n", action, opcode); + return -EBADRQC; +} + +static int relay_action_handler(struct xe_guc_relay *relay, u32 origin, + const u32 *msg, u32 len, u32 *response, u32 size) +{ + u32 type; + int ret; + + relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN); + + if (FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]) == GUC_RELAY_ACTION_VFXPF_TESTLOOP) + return relay_testloop_action_handler(relay, origin, msg, len, response, size); + + type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); + + /* XXX: PF services will be added later */ + ret = -EOPNOTSUPP; + + if (type == GUC_HXG_TYPE_EVENT) + relay_assert(relay, ret <= 0); + + return ret; +} + +static struct relay_transaction *relay_dequeue_transaction(struct xe_guc_relay *relay) +{ + struct relay_transaction *txn; + + spin_lock(&relay->lock); + txn = list_first_entry_or_null(&relay->incoming_actions, struct relay_transaction, link); + if (txn) + list_del_init(&txn->link); + spin_unlock(&relay->lock); + + return txn; +} + +static void relay_process_incoming_action(struct xe_guc_relay *relay) +{ + struct relay_transaction *txn; + bool again = false; + u32 type; + int ret; + + txn = relay_dequeue_transaction(relay); + if (!txn) + return; + + type = FIELD_GET(GUC_HXG_MSG_0_TYPE, txn->request_buf[txn->offset]); + + ret = relay_action_handler(relay, txn->remote, + txn->request_buf + txn->offset, txn->request_len, + txn->response_buf + txn->offset, + ARRAY_SIZE(txn->response_buf) - txn->offset); + + if (ret == -EINPROGRESS) { + again = true; + ret = guc_hxg_msg_encode_busy(txn->response_buf + txn->offset, 0); + } + + if (ret > 0) { + txn->response_len = ret; + ret = relay_send_transaction(relay, txn); + } + + if (ret < 0) { + u32 error = to_relay_error(ret); + + relay_notice(relay, "Failed to handle %s.%u from %u (%pe) %*ph\n", + guc_hxg_type_to_string(type), txn->rid, txn->remote, + ERR_PTR(ret), 4 * txn->request_len, txn->request_buf + txn->offset); + + txn->response_len = prepare_error_reply(txn->response_buf + txn->offset, + txn->remote ? + sanitize_relay_error(error) : error, + txn->remote ? + sanitize_relay_error_hint(-ret) : -ret); + ret = relay_send_transaction(relay, txn); + again = false; + } + + if (again) { + spin_lock(&relay->lock); + list_add(&txn->link, &relay->incoming_actions); + spin_unlock(&relay->lock); + return; + } + + if (unlikely(ret < 0)) + relay_notice(relay, "Failed to process action.%u (%pe) %*ph\n", + txn->rid, ERR_PTR(ret), 4 * txn->request_len, + txn->request_buf + txn->offset); + + relay_release_transaction(relay, txn); +} + +static bool relay_needs_worker(struct xe_guc_relay *relay) +{ + return !list_empty(&relay->incoming_actions); +} + +static void relay_kick_worker(struct xe_guc_relay *relay) +{ + KUNIT_STATIC_STUB_REDIRECT(relay_kick_worker, relay); + queue_work(relay_to_xe(relay)->sriov.wq, &relay->worker); +} + +static void relays_worker_fn(struct work_struct *w) +{ + struct xe_guc_relay *relay = container_of(w, struct xe_guc_relay, worker); + + relay_process_incoming_action(relay); + + if (relay_needs_worker(relay)) + relay_kick_worker(relay); +} + +static int relay_queue_action_msg(struct xe_guc_relay *relay, u32 origin, u32 rid, + const u32 *msg, u32 len) +{ + struct relay_transaction *txn; + + txn = relay_new_incoming_transaction(relay, origin, rid, msg, len); + if (IS_ERR(txn)) + return PTR_ERR(txn); + + spin_lock(&relay->lock); + list_add_tail(&txn->link, &relay->incoming_actions); + spin_unlock(&relay->lock); + + relay_kick_worker(relay); + return 0; +} + +static int relay_process_msg(struct xe_guc_relay *relay, u32 origin, u32 rid, + const u32 *msg, u32 len) +{ + u32 type; + int err; + + if (unlikely(len < GUC_HXG_MSG_MIN_LEN)) + return -EPROTO; + + if (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST) + return -EPROTO; + + type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); + relay_debug(relay, "received %s.%u from %u = %*ph\n", + guc_hxg_type_to_string(type), rid, origin, 4 * len, msg); + + switch (type) { + case GUC_HXG_TYPE_REQUEST: + case GUC_HXG_TYPE_FAST_REQUEST: + case GUC_HXG_TYPE_EVENT: + err = relay_queue_action_msg(relay, origin, rid, msg, len); + break; + case GUC_HXG_TYPE_RESPONSE_SUCCESS: + err = relay_handle_reply(relay, origin, rid, 0, msg, len); + break; + case GUC_HXG_TYPE_NO_RESPONSE_BUSY: + err = relay_handle_reply(relay, origin, rid, -EBUSY, NULL, 0); + break; + case GUC_HXG_TYPE_NO_RESPONSE_RETRY: + err = relay_handle_reply(relay, origin, rid, -EAGAIN, NULL, 0); + break; + case GUC_HXG_TYPE_RESPONSE_FAILURE: + err = relay_handle_failure(relay, origin, rid, msg, len); + break; + default: + err = -EBADRQC; + } + + if (unlikely(err)) + relay_notice(relay, "Failed to process %s.%u from %u (%pe) %*ph\n", + guc_hxg_type_to_string(type), rid, origin, + ERR_PTR(err), 4 * len, msg); + + return err; +} + +/** + * xe_guc_relay_process_guc2vf - Handle relay notification message from the GuC. + * @relay: the &xe_guc_relay which will handle the message + * @msg: message to be handled + * @len: length of the message (in dwords) + * + * This function will handle relay messages received from the GuC. + * + * This function is can only be used if driver is running in SR-IOV mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len) +{ + u32 rid; + + relay_assert(relay, len >= GUC_HXG_MSG_MIN_LEN); + relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC); + relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); + relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) == + XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF); + + if (unlikely(!IS_SRIOV_VF(relay_to_xe(relay)) && !kunit_get_current_test())) + return -EPERM; + + if (unlikely(!relay_is_ready(relay))) + return -ENODEV; + + if (unlikely(len < GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN)) + return -EPROTO; + + if (unlikely(len > GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN)) + return -EMSGSIZE; + + if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) + return -EPFNOSUPPORT; + + rid = FIELD_GET(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, msg[1]); + + return relay_process_msg(relay, PFID, rid, + msg + GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN, + len - GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN); +} + +#ifdef CONFIG_PCI_IOV +/** + * xe_guc_relay_process_guc2pf - Handle relay notification message from the GuC. + * @relay: the &xe_guc_relay which will handle the message + * @msg: message to be handled + * @len: length of the message (in dwords) + * + * This function will handle relay messages received from the GuC. + * + * This function can only be used if driver is running in SR-IOV PF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len) +{ + u32 origin, rid; + int err; + + relay_assert(relay, len >= GUC_HXG_EVENT_MSG_MIN_LEN); + relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC); + relay_assert(relay, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); + relay_assert(relay, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) == + XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF); + + if (unlikely(!IS_SRIOV_PF(relay_to_xe(relay)) && !kunit_get_current_test())) + return -EPERM; + + if (unlikely(!relay_is_ready(relay))) + return -ENODEV; + + if (unlikely(len < GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN)) + return -EPROTO; + + if (unlikely(len > GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN)) + return -EMSGSIZE; + + if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) + return -EPFNOSUPPORT; + + origin = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, msg[1]); + rid = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, msg[2]); + + if (unlikely(origin > relay_get_totalvfs(relay))) + return -ENOENT; + + err = relay_process_msg(relay, origin, rid, + msg + GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN, + len - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN); + + return err; +} +#endif + +#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_guc_relay_test.c" +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_relay.h b/drivers/gpu/drm/xe/xe_guc_relay.h new file mode 100644 index 000000000000..385429aa188a --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_relay.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GUC_RELAY_H_ +#define _XE_GUC_RELAY_H_ + +#include <linux/types.h> +#include <linux/errno.h> + +struct xe_guc_relay; + +int xe_guc_relay_init(struct xe_guc_relay *relay); + +int xe_guc_relay_send_to_pf(struct xe_guc_relay *relay, + const u32 *msg, u32 len, u32 *buf, u32 buf_size); + +int xe_guc_relay_process_guc2vf(struct xe_guc_relay *relay, const u32 *msg, u32 len); + +#ifdef CONFIG_PCI_IOV +int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target, + const u32 *msg, u32 len, u32 *buf, u32 buf_size); +int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len); +#else +static inline int xe_guc_relay_send_to_vf(struct xe_guc_relay *relay, u32 target, + const u32 *msg, u32 len, u32 *buf, u32 buf_size) +{ + return -ENODEV; +} +static inline int xe_guc_relay_process_guc2pf(struct xe_guc_relay *relay, const u32 *msg, u32 len) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_relay_types.h b/drivers/gpu/drm/xe/xe_guc_relay_types.h new file mode 100644 index 000000000000..5999fcb77e96 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_relay_types.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GUC_RELAY_TYPES_H_ +#define _XE_GUC_RELAY_TYPES_H_ + +#include <linux/mempool.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +/** + * struct xe_guc_relay - Data used by the VF-PF Relay Communication over GuC. + */ +struct xe_guc_relay { + /**@lock: protects all internal data. */ + spinlock_t lock; + + /** @worker: dispatches incoming action messages. */ + struct work_struct worker; + + /** @pending_relays: list of sent requests that await a response. */ + struct list_head pending_relays; + + /** @incoming_actions: list of incoming relay action messages to process. */ + struct list_head incoming_actions; + + /** @pool: pool of the relay message buffers. */ + mempool_t pool; + + /** @last_rid: last Relay-ID used while sending a message. */ + u32 last_rid; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 54ffcfcdd41f..ff77bc8da1b2 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -23,6 +23,7 @@ #include "xe_force_wake.h" #include "xe_gpu_scheduler.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_guc_exec_queue_types.h" @@ -311,7 +312,7 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa q->guc->id - GUC_ID_START_MLRC, order_base_2(q->width)); else - ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id); + ida_free(&guc->submission_state.guc_ids, q->guc->id); } static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) @@ -335,8 +336,8 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC, order_base_2(q->width)); } else { - ret = ida_simple_get(&guc->submission_state.guc_ids, 0, - GUC_ID_NUMBER_SLRC, GFP_NOWAIT); + ret = ida_alloc_max(&guc->submission_state.guc_ids, + GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT); } if (ret < 0) return ret; @@ -811,7 +812,8 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) static void simple_error_capture(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); - struct drm_printer p = drm_err_printer(""); + struct xe_device *xe = guc_to_xe(guc); + struct drm_printer p = drm_err_printer(&xe->drm, NULL); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; u32 adj_logical_mask = q->logical_mask; @@ -928,13 +930,15 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) int i = 0; if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { - xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL)); - xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))); - drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx", xe_sched_job_seqno(job), q->guc->id, q->flags); + xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL, + "Kernel-submitted job timed out\n"); + xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q), + "VM job timed out on non-killed execqueue\n"); + simple_error_capture(q); - xe_devcoredump(q); + xe_devcoredump(job); } else { drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx", xe_sched_job_seqno(job), q->guc->id, q->flags); @@ -1028,8 +1032,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) if (xe_exec_queue_is_lr(q)) cancel_work_sync(&ge->lr_tdr); - if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) - xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q); release_guc_id(guc, q); xe_sched_entity_fini(&ge->entity); xe_sched_fini(&ge->sched); @@ -1218,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) init_waitqueue_head(&ge->suspend_wait); timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : - q->hwe->eclass->sched_props.job_timeout_ms; + q->sched_props.job_timeout_ms; err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, get_submit_wq(guc), q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64, @@ -1350,21 +1352,6 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, return 0; } -static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms) -{ - struct xe_gpu_scheduler *sched = &q->guc->sched; - struct xe_guc *guc = exec_queue_to_guc(q); - struct xe_device *xe = guc_to_xe(guc); - - xe_assert(xe, !exec_queue_registered(q)); - xe_assert(xe, !exec_queue_banned(q)); - xe_assert(xe, !exec_queue_killed(q)); - - sched->base.timeout = job_timeout_ms; - - return 0; -} - static int guc_exec_queue_suspend(struct xe_exec_queue *q) { struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; @@ -1415,7 +1402,6 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = { .set_priority = guc_exec_queue_set_priority, .set_timeslice = guc_exec_queue_set_timeslice, .set_preempt_timeout = guc_exec_queue_set_preempt_timeout, - .set_job_timeout = guc_exec_queue_set_job_timeout, .suspend = guc_exec_queue_suspend, .suspend_wait = guc_exec_queue_suspend_wait, .resume = guc_exec_queue_resume, @@ -1796,7 +1782,7 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps /** * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine. - * @q: Xe exec queue. + * @job: faulty Xe scheduled job. * * This can be printed out in a later stage like during dev_coredump * analysis. @@ -1805,21 +1791,17 @@ guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps * caller, using `xe_guc_exec_queue_snapshot_free`. */ struct xe_guc_submit_exec_queue_snapshot * -xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) +xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job) { - struct xe_guc *guc = exec_queue_to_guc(q); - struct xe_device *xe = guc_to_xe(guc); + struct xe_exec_queue *q = job->q; struct xe_gpu_scheduler *sched = &q->guc->sched; - struct xe_sched_job *job; struct xe_guc_submit_exec_queue_snapshot *snapshot; int i; snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); - if (!snapshot) { - drm_err(&xe->drm, "Skipping GuC Engine snapshot entirely.\n"); + if (!snapshot) return NULL; - } snapshot->guc.id = q->guc->id; memcpy(&snapshot->name, &q->name, sizeof(snapshot->name)); @@ -1835,9 +1817,7 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot), GFP_ATOMIC); - if (!snapshot->lrc) { - drm_err(&xe->drm, "Skipping GuC Engine LRC snapshot.\n"); - } else { + if (snapshot->lrc) { for (i = 0; i < q->width; ++i) { struct xe_lrc *lrc = q->lrc + i; @@ -1865,17 +1845,17 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) sizeof(struct pending_list_snapshot), GFP_ATOMIC); - if (!snapshot->pending_list) { - drm_err(&xe->drm, "Skipping GuC Engine pending_list snapshot.\n"); - } else { + if (snapshot->pending_list) { + struct xe_sched_job *job_iter; + i = 0; - list_for_each_entry(job, &sched->base.pending_list, drm.list) { + list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) { snapshot->pending_list[i].seqno = - xe_sched_job_seqno(job); + xe_sched_job_seqno(job_iter); snapshot->pending_list[i].fence = - dma_fence_is_signaled(job->fence) ? 1 : 0; + dma_fence_is_signaled(job_iter->fence) ? 1 : 0; snapshot->pending_list[i].finished = - dma_fence_is_signaled(&job->drm.s_fence->finished) + dma_fence_is_signaled(&job_iter->drm.s_fence->finished) ? 1 : 0; i++; } @@ -1961,10 +1941,28 @@ void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *s static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) { struct xe_guc_submit_exec_queue_snapshot *snapshot; + struct xe_gpu_scheduler *sched = &q->guc->sched; + struct xe_sched_job *job; + bool found = false; - snapshot = xe_guc_exec_queue_snapshot_capture(q); + spin_lock(&sched->base.job_list_lock); + list_for_each_entry(job, &sched->base.pending_list, drm.list) { + if (job->q == q) { + xe_sched_job_get(job); + found = true; + break; + } + } + spin_unlock(&sched->base.job_list_lock); + + if (!found) + return; + + snapshot = xe_guc_exec_queue_snapshot_capture(job); xe_guc_exec_queue_snapshot_print(snapshot, p); xe_guc_exec_queue_snapshot_free(snapshot); + + xe_sched_job_put(job); } /** diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h index fc97869c5b86..723dc2bd8df9 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.h +++ b/drivers/gpu/drm/xe/xe_guc_submit.h @@ -9,8 +9,8 @@ #include <linux/types.h> struct drm_printer; -struct xe_exec_queue; struct xe_guc; +struct xe_sched_job; int xe_guc_submit_init(struct xe_guc *guc); @@ -27,7 +27,7 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len); struct xe_guc_submit_exec_queue_snapshot * -xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q); +xe_guc_exec_queue_snapshot_capture(struct xe_sched_job *job); void xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h index 649b0a852692..72fc0f42b0a5 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit_types.h +++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h @@ -102,9 +102,9 @@ struct xe_guc_submit_exec_queue_snapshot { /** @sched_props: scheduling properties */ struct { - /** @timeslice_us: timeslice period in micro-seconds */ + /** @sched_props.timeslice_us: timeslice period in micro-seconds */ u32 timeslice_us; - /** @preempt_timeout_us: preemption timeout in micro-seconds */ + /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */ u32 preempt_timeout_us; } sched_props; @@ -118,11 +118,11 @@ struct xe_guc_submit_exec_queue_snapshot { /** @guc: GuC Engine Snapshot */ struct { - /** @wqi_head: work queue item head */ + /** @guc.wqi_head: work queue item head */ u32 wqi_head; - /** @wqi_tail: work queue item tail */ + /** @guc.wqi_tail: work queue item tail */ u32 wqi_tail; - /** @id: GuC id for this exec_queue */ + /** @guc.id: GuC id for this exec_queue */ u16 id; } guc; @@ -133,13 +133,13 @@ struct xe_guc_submit_exec_queue_snapshot { bool parallel_execution; /** @parallel: snapshot of the useful parallel scratch */ struct { - /** @wq_desc: Workqueue description */ + /** @parallel.wq_desc: Workqueue description */ struct { - /** @head: Workqueue Head */ + /** @parallel.wq_desc.head: Workqueue Head */ u32 head; - /** @tail: Workqueue Tail */ + /** @parallel.wq_desc.tail: Workqueue Tail */ u32 tail; - /** @status: Workqueue Status */ + /** @parallel.wq_desc.status: Workqueue Status */ u32 status; } wq_desc; /** @wq: Workqueue Items */ diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index cd80802e8918..edcd1a950bd3 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -15,9 +15,23 @@ #include "xe_guc_fwif.h" #include "xe_guc_log_types.h" #include "xe_guc_pc_types.h" +#include "xe_guc_relay_types.h" #include "xe_uc_fw_types.h" /** + * struct xe_guc_db_mgr - GuC Doorbells Manager. + * + * Note: GuC Doorbells Manager is relying on &xe_guc::submission_state.lock + * to protect its members. + */ +struct xe_guc_db_mgr { + /** @count: number of doorbells to manage */ + unsigned int count; + /** @bitmap: bitmap to track allocated doorbells */ + unsigned long *bitmap; +}; + +/** * struct xe_guc - Graphic micro controller */ struct xe_guc { @@ -31,45 +45,50 @@ struct xe_guc { struct xe_guc_ct ct; /** @pc: GuC Power Conservation */ struct xe_guc_pc pc; + /** @dbm: GuC Doorbell Manager */ + struct xe_guc_db_mgr dbm; /** @submission_state: GuC submission state */ struct { - /** @exec_queue_lookup: Lookup an xe_engine from guc_id */ + /** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */ struct xarray exec_queue_lookup; - /** @guc_ids: used to allocate new guc_ids, single-lrc */ + /** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */ struct ida guc_ids; - /** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */ + /** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */ unsigned long *guc_ids_bitmap; - /** @stopped: submissions are stopped */ + /** @submission_state.stopped: submissions are stopped */ atomic_t stopped; - /** @lock: protects submission state */ + /** @submission_state.lock: protects submission state */ struct mutex lock; - /** @suspend: suspend fence state */ + /** @submission_state.suspend: suspend fence state */ struct { - /** @lock: suspend fences lock */ + /** @submission_state.suspend.lock: suspend fences lock */ spinlock_t lock; - /** @context: suspend fences context */ + /** @submission_state.suspend.context: suspend fences context */ u64 context; - /** @seqno: suspend fences seqno */ + /** @submission_state.suspend.seqno: suspend fences seqno */ u32 seqno; } suspend; #ifdef CONFIG_PROVE_LOCKING #define NUM_SUBMIT_WQ 256 - /** @submit_wq_pool: submission ordered workqueues pool */ + /** @submission_state.submit_wq_pool: submission ordered workqueues pool */ struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ]; - /** @submit_wq_idx: submission ordered workqueue index */ + /** @submission_state.submit_wq_idx: submission ordered workqueue index */ int submit_wq_idx; #endif - /** @enabled: submission is enabled */ + /** @submission_state.enabled: submission is enabled */ bool enabled; } submission_state; /** @hwconfig: Hardware config state */ struct { - /** @bo: buffer object of the hardware config */ + /** @hwconfig.bo: buffer object of the hardware config */ struct xe_bo *bo; - /** @size: size of the hardware config */ + /** @hwconfig.size: size of the hardware config */ u32 size; } hwconfig; + /** @relay: GuC Relay Communication used in SR-IOV */ + struct xe_guc_relay relay; + /** * @notify_reg: Register which is written to notify GuC of H2G messages */ diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c index bfdd33b9b23b..1c9d38b6f5f1 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.c +++ b/drivers/gpu/drm/xe/xe_heci_gsc.c @@ -29,7 +29,7 @@ static void heci_gsc_irq_unmask(struct irq_data *d) /* generic irq handling */ } -static struct irq_chip heci_gsc_irq_chip = { +static const struct irq_chip heci_gsc_irq_chip = { .name = "gsc_irq_chip", .irq_mask = heci_gsc_irq_mask, .irq_unmask = heci_gsc_irq_unmask, diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index eca109791c6a..b545f850087c 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -112,6 +112,25 @@ out: return ret; } +int xe_huc_init_post_hwconfig(struct xe_huc *huc) +{ + struct xe_tile *tile = gt_to_tile(huc_to_gt(huc)); + struct xe_device *xe = huc_to_xe(huc); + int ret; + + if (!IS_DGFX(huc_to_xe(huc))) + return 0; + + if (!xe_uc_fw_is_loadable(&huc->fw)) + return 0; + + ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo); + if (ret) + return ret; + + return 0; +} + int xe_huc_upload(struct xe_huc *huc) { if (!xe_uc_fw_is_loadable(&huc->fw)) diff --git a/drivers/gpu/drm/xe/xe_huc.h b/drivers/gpu/drm/xe/xe_huc.h index 532017230287..3ab56cc14b00 100644 --- a/drivers/gpu/drm/xe/xe_huc.h +++ b/drivers/gpu/drm/xe/xe_huc.h @@ -17,6 +17,7 @@ enum xe_huc_auth_types { }; int xe_huc_init(struct xe_huc *huc); +int xe_huc_init_post_hwconfig(struct xe_huc *huc); int xe_huc_upload(struct xe_huc *huc); int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type); bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type); diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 1fa5cf5eea97..b5e83ea172f3 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -25,6 +25,7 @@ #include "xe_reg_sr.h" #include "xe_rtp.h" #include "xe_sched_job.h" +#include "xe_sriov.h" #include "xe_tuning.h" #include "xe_uc_fw.h" #include "xe_wa.h" @@ -34,6 +35,7 @@ struct engine_info { const char *name; unsigned int class : 8; unsigned int instance : 8; + unsigned int irq_offset : 8; enum xe_force_wake_domains domain; u32 mmio_base; }; @@ -43,6 +45,7 @@ static const struct engine_info engine_infos[] = { .name = "rcs0", .class = XE_ENGINE_CLASS_RENDER, .instance = 0, + .irq_offset = ilog2(INTR_RCS0), .domain = XE_FW_RENDER, .mmio_base = RENDER_RING_BASE, }, @@ -50,6 +53,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs0", .class = XE_ENGINE_CLASS_COPY, .instance = 0, + .irq_offset = ilog2(INTR_BCS(0)), .domain = XE_FW_RENDER, .mmio_base = BLT_RING_BASE, }, @@ -57,6 +61,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs1", .class = XE_ENGINE_CLASS_COPY, .instance = 1, + .irq_offset = ilog2(INTR_BCS(1)), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS1_RING_BASE, }, @@ -64,6 +69,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs2", .class = XE_ENGINE_CLASS_COPY, .instance = 2, + .irq_offset = ilog2(INTR_BCS(2)), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS2_RING_BASE, }, @@ -71,6 +77,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs3", .class = XE_ENGINE_CLASS_COPY, .instance = 3, + .irq_offset = ilog2(INTR_BCS(3)), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS3_RING_BASE, }, @@ -78,6 +85,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs4", .class = XE_ENGINE_CLASS_COPY, .instance = 4, + .irq_offset = ilog2(INTR_BCS(4)), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS4_RING_BASE, }, @@ -85,6 +93,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs5", .class = XE_ENGINE_CLASS_COPY, .instance = 5, + .irq_offset = ilog2(INTR_BCS(5)), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS5_RING_BASE, }, @@ -92,12 +101,14 @@ static const struct engine_info engine_infos[] = { .name = "bcs6", .class = XE_ENGINE_CLASS_COPY, .instance = 6, + .irq_offset = ilog2(INTR_BCS(6)), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS6_RING_BASE, }, [XE_HW_ENGINE_BCS7] = { .name = "bcs7", .class = XE_ENGINE_CLASS_COPY, + .irq_offset = ilog2(INTR_BCS(7)), .instance = 7, .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS7_RING_BASE, @@ -106,6 +117,7 @@ static const struct engine_info engine_infos[] = { .name = "bcs8", .class = XE_ENGINE_CLASS_COPY, .instance = 8, + .irq_offset = ilog2(INTR_BCS8), .domain = XE_FW_RENDER, .mmio_base = XEHPC_BCS8_RING_BASE, }, @@ -114,6 +126,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs0", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 0, + .irq_offset = 32 + ilog2(INTR_VCS(0)), .domain = XE_FW_MEDIA_VDBOX0, .mmio_base = BSD_RING_BASE, }, @@ -121,6 +134,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs1", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 1, + .irq_offset = 32 + ilog2(INTR_VCS(1)), .domain = XE_FW_MEDIA_VDBOX1, .mmio_base = BSD2_RING_BASE, }, @@ -128,6 +142,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs2", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 2, + .irq_offset = 32 + ilog2(INTR_VCS(2)), .domain = XE_FW_MEDIA_VDBOX2, .mmio_base = BSD3_RING_BASE, }, @@ -135,6 +150,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs3", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 3, + .irq_offset = 32 + ilog2(INTR_VCS(3)), .domain = XE_FW_MEDIA_VDBOX3, .mmio_base = BSD4_RING_BASE, }, @@ -142,6 +158,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs4", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 4, + .irq_offset = 32 + ilog2(INTR_VCS(4)), .domain = XE_FW_MEDIA_VDBOX4, .mmio_base = XEHP_BSD5_RING_BASE, }, @@ -149,6 +166,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs5", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 5, + .irq_offset = 32 + ilog2(INTR_VCS(5)), .domain = XE_FW_MEDIA_VDBOX5, .mmio_base = XEHP_BSD6_RING_BASE, }, @@ -156,6 +174,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs6", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 6, + .irq_offset = 32 + ilog2(INTR_VCS(6)), .domain = XE_FW_MEDIA_VDBOX6, .mmio_base = XEHP_BSD7_RING_BASE, }, @@ -163,6 +182,7 @@ static const struct engine_info engine_infos[] = { .name = "vcs7", .class = XE_ENGINE_CLASS_VIDEO_DECODE, .instance = 7, + .irq_offset = 32 + ilog2(INTR_VCS(7)), .domain = XE_FW_MEDIA_VDBOX7, .mmio_base = XEHP_BSD8_RING_BASE, }, @@ -170,6 +190,7 @@ static const struct engine_info engine_infos[] = { .name = "vecs0", .class = XE_ENGINE_CLASS_VIDEO_ENHANCE, .instance = 0, + .irq_offset = 32 + ilog2(INTR_VECS(0)), .domain = XE_FW_MEDIA_VEBOX0, .mmio_base = VEBOX_RING_BASE, }, @@ -177,6 +198,7 @@ static const struct engine_info engine_infos[] = { .name = "vecs1", .class = XE_ENGINE_CLASS_VIDEO_ENHANCE, .instance = 1, + .irq_offset = 32 + ilog2(INTR_VECS(1)), .domain = XE_FW_MEDIA_VEBOX1, .mmio_base = VEBOX2_RING_BASE, }, @@ -184,6 +206,7 @@ static const struct engine_info engine_infos[] = { .name = "vecs2", .class = XE_ENGINE_CLASS_VIDEO_ENHANCE, .instance = 2, + .irq_offset = 32 + ilog2(INTR_VECS(2)), .domain = XE_FW_MEDIA_VEBOX2, .mmio_base = XEHP_VEBOX3_RING_BASE, }, @@ -191,6 +214,7 @@ static const struct engine_info engine_infos[] = { .name = "vecs3", .class = XE_ENGINE_CLASS_VIDEO_ENHANCE, .instance = 3, + .irq_offset = 32 + ilog2(INTR_VECS(3)), .domain = XE_FW_MEDIA_VEBOX3, .mmio_base = XEHP_VEBOX4_RING_BASE, }, @@ -198,6 +222,7 @@ static const struct engine_info engine_infos[] = { .name = "ccs0", .class = XE_ENGINE_CLASS_COMPUTE, .instance = 0, + .irq_offset = ilog2(INTR_CCS(0)), .domain = XE_FW_RENDER, .mmio_base = COMPUTE0_RING_BASE, }, @@ -205,6 +230,7 @@ static const struct engine_info engine_infos[] = { .name = "ccs1", .class = XE_ENGINE_CLASS_COMPUTE, .instance = 1, + .irq_offset = ilog2(INTR_CCS(1)), .domain = XE_FW_RENDER, .mmio_base = COMPUTE1_RING_BASE, }, @@ -212,6 +238,7 @@ static const struct engine_info engine_infos[] = { .name = "ccs2", .class = XE_ENGINE_CLASS_COMPUTE, .instance = 2, + .irq_offset = ilog2(INTR_CCS(2)), .domain = XE_FW_RENDER, .mmio_base = COMPUTE2_RING_BASE, }, @@ -219,6 +246,7 @@ static const struct engine_info engine_infos[] = { .name = "ccs3", .class = XE_ENGINE_CLASS_COMPUTE, .instance = 3, + .irq_offset = ilog2(INTR_CCS(3)), .domain = XE_FW_RENDER, .mmio_base = COMPUTE3_RING_BASE, }, @@ -289,6 +317,19 @@ static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt, xe_rtp_match_first_render_or_compute(gt, hwe); } +static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt, + const struct xe_hw_engine *hwe) +{ + if (GRAPHICS_VER(gt_to_xe(gt)) < 20) + return false; + + if (hwe->class != XE_ENGINE_CLASS_COMPUTE && + hwe->class != XE_ENGINE_CLASS_RENDER) + return false; + + return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE; +} + void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe) { @@ -319,6 +360,14 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe) XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE)) }, + /* Disable WMTP if HW doesn't support it */ + { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"), + XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)), + XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0), + PREEMPT_GPGPU_LEVEL_MASK, + PREEMPT_GPGPU_THREAD_GROUP_LEVEL)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE) + }, {} }; @@ -397,6 +446,7 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, hwe->class = info->class; hwe->instance = info->instance; hwe->mmio_base = info->mmio_base; + hwe->irq_offset = info->irq_offset; hwe->domain = info->domain; hwe->name = info->name; hwe->fence_irq = >->fence_irq[info->class]; @@ -700,7 +750,7 @@ struct xe_hw_engine_snapshot * xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) { struct xe_hw_engine_snapshot *snapshot; - int len; + u64 val; if (!xe_hw_engine_is_valid(hwe)) return NULL; @@ -710,11 +760,7 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) if (!snapshot) return NULL; - len = strlen(hwe->name) + 1; - snapshot->name = kzalloc(len, GFP_ATOMIC); - if (snapshot->name) - strscpy(snapshot->name, hwe->name, len); - + snapshot->name = kstrdup(hwe->name, GFP_ATOMIC); snapshot->class = hwe->class; snapshot->logical_instance = hwe->logical_instance; snapshot->forcewake.domain = hwe->domain; @@ -722,19 +768,35 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) hwe->domain); snapshot->mmio_base = hwe->mmio_base; - snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); - snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, - RING_HWS_PGA(0)); - snapshot->reg.ring_execlist_status_lo = + /* no more VF accessible data below this point */ + if (IS_SRIOV_VF(gt_to_xe(hwe->gt))) + return snapshot; + + snapshot->reg.ring_execlist_status = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)); - snapshot->reg.ring_execlist_status_hi = - hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); - snapshot->reg.ring_execlist_sq_contents_lo = - hw_engine_mmio_read32(hwe, - RING_EXECLIST_SQ_CONTENTS_LO(0)); - snapshot->reg.ring_execlist_sq_contents_hi = - hw_engine_mmio_read32(hwe, - RING_EXECLIST_SQ_CONTENTS_HI(0)); + val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); + snapshot->reg.ring_execlist_status |= val << 32; + + snapshot->reg.ring_execlist_sq_contents = + hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0)); + val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0)); + snapshot->reg.ring_execlist_sq_contents |= val << 32; + + snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0)); + val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); + snapshot->reg.ring_acthd |= val << 32; + + snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0)); + val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); + snapshot->reg.ring_bbaddr |= val << 32; + + snapshot->reg.ring_dma_fadd = + hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); + val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); + snapshot->reg.ring_dma_fadd |= val << 32; + + snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); + snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)); snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0)); snapshot->reg.ring_head = hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR; @@ -748,16 +810,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0)); snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0)); snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0)); - snapshot->reg.ring_acthd_udw = - hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); - snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0)); - snapshot->reg.ring_bbaddr_udw = - hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); - snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0)); - snapshot->reg.ring_dma_fadd_udw = - hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); - snapshot->reg.ring_dma_fadd = - hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0)); if (snapshot->class == XE_ENGINE_CLASS_COMPUTE) @@ -786,33 +838,25 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, snapshot->forcewake.domain, snapshot->forcewake.ref); drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam); drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga); - drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n", - snapshot->reg.ring_execlist_status_lo); - drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n", - snapshot->reg.ring_execlist_status_hi); - drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n", - snapshot->reg.ring_execlist_sq_contents_lo); - drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n", - snapshot->reg.ring_execlist_sq_contents_hi); + drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n", + snapshot->reg.ring_execlist_status); + drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n", + snapshot->reg.ring_execlist_sq_contents); drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start); - drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head); - drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail); + drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head); + drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail); drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl); drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode); drm_printf(p, "\tRING_MODE: 0x%08x\n", snapshot->reg.ring_mode); - drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr); - drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr); - drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr); - drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir); - drm_printf(p, "\tACTHD: 0x%08x_%08x\n", snapshot->reg.ring_acthd_udw, - snapshot->reg.ring_acthd); - drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", snapshot->reg.ring_bbaddr_udw, - snapshot->reg.ring_bbaddr); - drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n", - snapshot->reg.ring_dma_fadd_udw, - snapshot->reg.ring_dma_fadd); - drm_printf(p, "\tIPEHR: 0x%08x\n\n", snapshot->reg.ipehr); + drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr); + drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr); + drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr); + drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir); + drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd); + drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr); + drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd); + drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr); if (snapshot->class == XE_ENGINE_CLASS_COMPUTE) drm_printf(p, "\tRCU_MODE: 0x%08x\n", snapshot->reg.rcu_mode); diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c index e49bc14f0ecf..2345fb42fa39 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c @@ -73,7 +73,7 @@ static ssize_t job_timeout_max_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_max); } -static struct kobj_attribute job_timeout_max_attr = +static const struct kobj_attribute job_timeout_max_attr = __ATTR(job_timeout_max, 0644, job_timeout_max_show, job_timeout_max_store); static ssize_t job_timeout_min_store(struct kobject *kobj, @@ -109,7 +109,7 @@ static ssize_t job_timeout_min_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_min); } -static struct kobj_attribute job_timeout_min_attr = +static const struct kobj_attribute job_timeout_min_attr = __ATTR(job_timeout_min, 0644, job_timeout_min_show, job_timeout_min_store); static ssize_t job_timeout_store(struct kobject *kobj, @@ -142,7 +142,7 @@ static ssize_t job_timeout_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_ms); } -static struct kobj_attribute job_timeout_attr = +static const struct kobj_attribute job_timeout_attr = __ATTR(job_timeout_ms, 0644, job_timeout_show, job_timeout_store); static ssize_t job_timeout_default(struct kobject *kobj, @@ -153,7 +153,7 @@ static ssize_t job_timeout_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.job_timeout_ms); } -static struct kobj_attribute job_timeout_def = +static const struct kobj_attribute job_timeout_def = __ATTR(job_timeout_ms, 0444, job_timeout_default, NULL); static ssize_t job_timeout_min_default(struct kobject *kobj, @@ -164,7 +164,7 @@ static ssize_t job_timeout_min_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.job_timeout_min); } -static struct kobj_attribute job_timeout_min_def = +static const struct kobj_attribute job_timeout_min_def = __ATTR(job_timeout_min, 0444, job_timeout_min_default, NULL); static ssize_t job_timeout_max_default(struct kobject *kobj, @@ -175,7 +175,7 @@ static ssize_t job_timeout_max_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.job_timeout_max); } -static struct kobj_attribute job_timeout_max_def = +static const struct kobj_attribute job_timeout_max_def = __ATTR(job_timeout_max, 0444, job_timeout_max_default, NULL); static ssize_t timeslice_duration_store(struct kobject *kobj, @@ -234,7 +234,7 @@ static ssize_t timeslice_duration_max_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.timeslice_max); } -static struct kobj_attribute timeslice_duration_max_attr = +static const struct kobj_attribute timeslice_duration_max_attr = __ATTR(timeslice_duration_max, 0644, timeslice_duration_max_show, timeslice_duration_max_store); @@ -272,7 +272,7 @@ static ssize_t timeslice_duration_min_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.timeslice_min); } -static struct kobj_attribute timeslice_duration_min_attr = +static const struct kobj_attribute timeslice_duration_min_attr = __ATTR(timeslice_duration_min, 0644, timeslice_duration_min_show, timeslice_duration_min_store); @@ -284,7 +284,7 @@ static ssize_t timeslice_duration_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.timeslice_us); } -static struct kobj_attribute timeslice_duration_attr = +static const struct kobj_attribute timeslice_duration_attr = __ATTR(timeslice_duration_us, 0644, timeslice_duration_show, timeslice_duration_store); @@ -296,7 +296,7 @@ static ssize_t timeslice_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.timeslice_us); } -static struct kobj_attribute timeslice_duration_def = +static const struct kobj_attribute timeslice_duration_def = __ATTR(timeslice_duration_us, 0444, timeslice_default, NULL); static ssize_t timeslice_min_default(struct kobject *kobj, @@ -307,7 +307,7 @@ static ssize_t timeslice_min_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.timeslice_min); } -static struct kobj_attribute timeslice_duration_min_def = +static const struct kobj_attribute timeslice_duration_min_def = __ATTR(timeslice_duration_min, 0444, timeslice_min_default, NULL); static ssize_t timeslice_max_default(struct kobject *kobj, @@ -318,7 +318,7 @@ static ssize_t timeslice_max_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.timeslice_max); } -static struct kobj_attribute timeslice_duration_max_def = +static const struct kobj_attribute timeslice_duration_max_def = __ATTR(timeslice_duration_max, 0444, timeslice_max_default, NULL); static ssize_t preempt_timeout_store(struct kobject *kobj, @@ -351,7 +351,7 @@ static ssize_t preempt_timeout_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_us); } -static struct kobj_attribute preempt_timeout_attr = +static const struct kobj_attribute preempt_timeout_attr = __ATTR(preempt_timeout_us, 0644, preempt_timeout_show, preempt_timeout_store); static ssize_t preempt_timeout_default(struct kobject *kobj, @@ -363,7 +363,7 @@ static ssize_t preempt_timeout_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_us); } -static struct kobj_attribute preempt_timeout_def = +static const struct kobj_attribute preempt_timeout_def = __ATTR(preempt_timeout_us, 0444, preempt_timeout_default, NULL); static ssize_t preempt_timeout_min_default(struct kobject *kobj, @@ -375,7 +375,7 @@ static ssize_t preempt_timeout_min_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_min); } -static struct kobj_attribute preempt_timeout_min_def = +static const struct kobj_attribute preempt_timeout_min_def = __ATTR(preempt_timeout_min, 0444, preempt_timeout_min_default, NULL); static ssize_t preempt_timeout_max_default(struct kobject *kobj, @@ -387,7 +387,7 @@ static ssize_t preempt_timeout_max_default(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_max); } -static struct kobj_attribute preempt_timeout_max_def = +static const struct kobj_attribute preempt_timeout_max_def = __ATTR(preempt_timeout_max, 0444, preempt_timeout_max_default, NULL); static ssize_t preempt_timeout_max_store(struct kobject *kobj, @@ -423,7 +423,7 @@ static ssize_t preempt_timeout_max_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_max); } -static struct kobj_attribute preempt_timeout_max_attr = +static const struct kobj_attribute preempt_timeout_max_attr = __ATTR(preempt_timeout_max, 0644, preempt_timeout_max_show, preempt_timeout_max_store); @@ -460,7 +460,7 @@ static ssize_t preempt_timeout_min_show(struct kobject *kobj, return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_min); } -static struct kobj_attribute preempt_timeout_min_attr = +static const struct kobj_attribute preempt_timeout_min_attr = __ATTR(preempt_timeout_min, 0644, preempt_timeout_min_show, preempt_timeout_min_store); @@ -477,7 +477,7 @@ static const struct attribute *defaults[] = { NULL }; -static const struct attribute *files[] = { +static const struct attribute * const files[] = { &job_timeout_attr.attr, &job_timeout_min_attr.attr, &job_timeout_max_attr.attr, diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h index 39908dec042a..d7f828c76cc5 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_types.h +++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h @@ -79,23 +79,23 @@ struct xe_hw_engine_class_intf { * @defaults: default scheduling properties */ struct { - /** @set_job_timeout: Set job timeout in ms for engine */ + /** @sched_props.set_job_timeout: Set job timeout in ms for engine */ u32 job_timeout_ms; - /** @job_timeout_min: Min job timeout in ms for engine */ + /** @sched_props.job_timeout_min: Min job timeout in ms for engine */ u32 job_timeout_min; - /** @job_timeout_max: Max job timeout in ms for engine */ + /** @sched_props.job_timeout_max: Max job timeout in ms for engine */ u32 job_timeout_max; - /** @timeslice_us: timeslice period in micro-seconds */ + /** @sched_props.timeslice_us: timeslice period in micro-seconds */ u32 timeslice_us; - /** @timeslice_min: min timeslice period in micro-seconds */ + /** @sched_props.timeslice_min: min timeslice period in micro-seconds */ u32 timeslice_min; - /** @timeslice_max: max timeslice period in micro-seconds */ + /** @sched_props.timeslice_max: max timeslice period in micro-seconds */ u32 timeslice_max; - /** @preempt_timeout_us: preemption timeout in micro-seconds */ + /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */ u32 preempt_timeout_us; - /** @preempt_timeout_min: min preemption timeout in micro-seconds */ + /** @sched_props.preempt_timeout_min: min preemption timeout in micro-seconds */ u32 preempt_timeout_min; - /** @preempt_timeout_max: max preemption timeout in micro-seconds */ + /** @sched_props.preempt_timeout_max: max preemption timeout in micro-seconds */ u32 preempt_timeout_max; } sched_props, defaults; }; @@ -116,6 +116,8 @@ struct xe_hw_engine { u16 instance; /** @logical_instance: logical instance of this hw engine */ u16 logical_instance; + /** @irq_offset: IRQ offset of this hw engine */ + u16 irq_offset; /** @mmio_base: MMIO base address of this hw engine*/ u32 mmio_base; /** @@ -162,62 +164,52 @@ struct xe_hw_engine_snapshot { u16 logical_instance; /** @forcewake: Force Wake information snapshot */ struct { - /** @domain: force wake domain of this hw engine */ + /** @forcewake.domain: force wake domain of this hw engine */ enum xe_force_wake_domains domain; - /** @ref: Forcewake ref for the above domain */ + /** @forcewake.ref: Forcewake ref for the above domain */ int ref; } forcewake; /** @mmio_base: MMIO base address of this hw engine*/ u32 mmio_base; /** @reg: Useful MMIO register snapshot */ struct { - /** @ring_hwstam: RING_HWSTAM */ + /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */ + u64 ring_execlist_status; + /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */ + u64 ring_execlist_sq_contents; + /** @reg.ring_acthd: RING_ACTHD */ + u64 ring_acthd; + /** @reg.ring_bbaddr: RING_BBADDR */ + u64 ring_bbaddr; + /** @reg.ring_dma_fadd: RING_DMA_FADD */ + u64 ring_dma_fadd; + /** @reg.ring_hwstam: RING_HWSTAM */ u32 ring_hwstam; - /** @ring_hws_pga: RING_HWS_PGA */ + /** @reg.ring_hws_pga: RING_HWS_PGA */ u32 ring_hws_pga; - /** @ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */ - u32 ring_execlist_status_lo; - /** @ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */ - u32 ring_execlist_status_hi; - /** @ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */ - u32 ring_execlist_sq_contents_lo; - /** @ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */ - u32 ring_execlist_sq_contents_hi; - /** @ring_start: RING_START */ + /** @reg.ring_start: RING_START */ u32 ring_start; - /** @ring_head: RING_HEAD */ + /** @reg.ring_head: RING_HEAD */ u32 ring_head; - /** @ring_tail: RING_TAIL */ + /** @reg.ring_tail: RING_TAIL */ u32 ring_tail; - /** @ring_ctl: RING_CTL */ + /** @reg.ring_ctl: RING_CTL */ u32 ring_ctl; - /** @ring_mi_mode: RING_MI_MODE */ + /** @reg.ring_mi_mode: RING_MI_MODE */ u32 ring_mi_mode; - /** @ring_mode: RING_MODE */ + /** @reg.ring_mode: RING_MODE */ u32 ring_mode; - /** @ring_imr: RING_IMR */ + /** @reg.ring_imr: RING_IMR */ u32 ring_imr; - /** @ring_esr: RING_ESR */ + /** @reg.ring_esr: RING_ESR */ u32 ring_esr; - /** @ring_emr: RING_EMR */ + /** @reg.ring_emr: RING_EMR */ u32 ring_emr; - /** @ring_eir: RING_EIR */ + /** @reg.ring_eir: RING_EIR */ u32 ring_eir; - /** @ring_acthd_udw: RING_ACTHD_UDW */ - u32 ring_acthd_udw; - /** @ring_acthd: RING_ACTHD */ - u32 ring_acthd; - /** @ring_bbaddr_udw: RING_BBADDR_UDW */ - u32 ring_bbaddr_udw; - /** @ring_bbaddr: RING_BBADDR */ - u32 ring_bbaddr; - /** @ring_dma_fadd_udw: RING_DMA_FADD_UDW */ - u32 ring_dma_fadd_udw; - /** @ring_dma_fadd: RING_DMA_FADD */ - u32 ring_dma_fadd; - /** @ipehr: IPEHR */ + /** @reg.ipehr: IPEHR */ u32 ipehr; - /** @rcu_mode: RCU_MODE */ + /** @reg.rcu_mode: RCU_MODE */ u32 rcu_mode; } reg; }; diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 174ed2185481..b82233a41606 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -10,12 +10,14 @@ #include <drm/drm_managed.h> #include "regs/xe_gt_regs.h" #include "regs/xe_mchbar_regs.h" +#include "regs/xe_pcode_regs.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_hwmon.h" #include "xe_mmio.h" #include "xe_pcode.h" #include "xe_pcode_api.h" +#include "xe_sriov.h" enum xe_hwmon_reg { REG_PKG_RAPL_LIMIT, @@ -77,32 +79,32 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg) switch (hwmon_reg) { case REG_PKG_RAPL_LIMIT: - if (xe->info.platform == XE_DG2) - reg = PCU_CR_PACKAGE_RAPL_LIMIT; - else if (xe->info.platform == XE_PVC) + if (xe->info.platform == XE_PVC) reg = PVC_GT0_PACKAGE_RAPL_LIMIT; + else if (xe->info.platform == XE_DG2) + reg = PCU_CR_PACKAGE_RAPL_LIMIT; break; case REG_PKG_POWER_SKU: - if (xe->info.platform == XE_DG2) - reg = PCU_CR_PACKAGE_POWER_SKU; - else if (xe->info.platform == XE_PVC) + if (xe->info.platform == XE_PVC) reg = PVC_GT0_PACKAGE_POWER_SKU; + else if (xe->info.platform == XE_DG2) + reg = PCU_CR_PACKAGE_POWER_SKU; break; case REG_PKG_POWER_SKU_UNIT: - if (xe->info.platform == XE_DG2) - reg = PCU_CR_PACKAGE_POWER_SKU_UNIT; - else if (xe->info.platform == XE_PVC) + if (xe->info.platform == XE_PVC) reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT; + else if (xe->info.platform == XE_DG2) + reg = PCU_CR_PACKAGE_POWER_SKU_UNIT; break; case REG_GT_PERF_STATUS: if (xe->info.platform == XE_DG2) reg = GT_PERF_STATUS; break; case REG_PKG_ENERGY_STATUS: - if (xe->info.platform == XE_DG2) - reg = PCU_CR_PACKAGE_ENERGY_STATUS; - else if (xe->info.platform == XE_PVC) + if (xe->info.platform == XE_PVC) reg = PVC_GT0_PLATFORM_ENERGY_STATUS; + else if (xe->info.platform == XE_DG2) + reg = PCU_CR_PACKAGE_ENERGY_STATUS; break; default: drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg); @@ -402,7 +404,7 @@ static const struct attribute_group *hwmon_groups[] = { NULL }; -static const struct hwmon_channel_info *hwmon_info[] = { +static const struct hwmon_channel_info * const hwmon_info[] = { HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT), HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT), @@ -745,6 +747,10 @@ void xe_hwmon_register(struct xe_device *xe) if (!IS_DGFX(xe)) return; + /* hwmon is not available on VFs */ + if (IS_SRIOV_VF(xe)) + return; + hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); if (!hwmon) return; diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index d1f5ba4bb745..2f5d179e0d00 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -9,15 +9,18 @@ #include <drm/drm_managed.h> +#include "display/xe_display.h" #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_device.h" -#include "xe_display.h" #include "xe_drv.h" +#include "xe_gsc_proxy.h" #include "xe_gt.h" #include "xe_guc.h" #include "xe_hw_engine.h" +#include "xe_memirq.h" #include "xe_mmio.h" +#include "xe_sriov.h" /* * Interrupt registers for a unit are always consecutive and ordered @@ -129,6 +132,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt) u32 ccs_mask, bcs_mask; u32 irqs, dmask, smask; u32 gsc_mask = 0; + u32 heci_mask = 0; if (xe_device_uc_enabled(xe)) { irqs = GT_RENDER_USER_INTERRUPT | @@ -178,14 +182,23 @@ void xe_irq_enable_hwe(struct xe_gt *gt) xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask); xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask); - if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) + /* + * the heci2 interrupt is enabled via the same register as the + * GSCCS interrupts, but it has its own mask register. + */ + if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) { gsc_mask = irqs; - else if (HAS_HECI_GSCFI(xe)) + heci_mask = GSC_IRQ_INTF(1); + } else if (HAS_HECI_GSCFI(xe)) { gsc_mask = GSC_IRQ_INTF(1); + } + if (gsc_mask) { - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask); + xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask); } + if (heci_mask) + xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); } } @@ -232,6 +245,8 @@ gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir) return xe_guc_irq_handler(>->uc.guc, iir); if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt)) return xe_guc_irq_handler(>->uc.guc, iir); + if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt)) + return xe_gsc_proxy_irq_handler(>->uc.gsc, iir); if (instance != OTHER_GUC_INSTANCE && instance != OTHER_MEDIA_GUC_INSTANCE) { @@ -249,15 +264,23 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile, if (MEDIA_VER(xe) < 13) return tile->primary_gt; - if (class == XE_ENGINE_CLASS_VIDEO_DECODE || - class == XE_ENGINE_CLASS_VIDEO_ENHANCE) + switch (class) { + case XE_ENGINE_CLASS_VIDEO_DECODE: + case XE_ENGINE_CLASS_VIDEO_ENHANCE: return tile->media_gt; - - if (class == XE_ENGINE_CLASS_OTHER && - (instance == OTHER_MEDIA_GUC_INSTANCE || instance == OTHER_GSC_INSTANCE)) - return tile->media_gt; - - return tile->primary_gt; + case XE_ENGINE_CLASS_OTHER: + switch (instance) { + case OTHER_MEDIA_GUC_INSTANCE: + case OTHER_GSC_INSTANCE: + case OTHER_GSC_HECI2_INSTANCE: + return tile->media_gt; + default: + break; + }; + fallthrough; + default: + return tile->primary_gt; + } } static void gt_irq_handler(struct xe_tile *tile, @@ -419,7 +442,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) * irq as device is inaccessible. */ if (master_ctl == REG_GENMASK(31, 0)) { - dev_dbg(tile_to_xe(tile)->drm.dev, + drm_dbg(&tile_to_xe(tile)->drm, "Ignore this IRQ as device might be in DPC containment.\n"); return IRQ_HANDLED; } @@ -484,6 +507,7 @@ static void gt_irq_reset(struct xe_tile *tile) HAS_HECI_GSCFI(tile_to_xe(tile))) { xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0); xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0); + xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0); } xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0); @@ -498,6 +522,9 @@ static void xelp_irq_reset(struct xe_tile *tile) gt_irq_reset(tile); + if (IS_SRIOV_VF(tile_to_xe(tile))) + return; + mask_and_disable(tile, PCU_IRQ_OFFSET); } @@ -508,6 +535,9 @@ static void dg1_irq_reset(struct xe_tile *tile) gt_irq_reset(tile); + if (IS_SRIOV_VF(tile_to_xe(tile))) + return; + mask_and_disable(tile, PCU_IRQ_OFFSET); } @@ -518,11 +548,34 @@ static void dg1_irq_reset_mstr(struct xe_tile *tile) xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0); } +static void vf_irq_reset(struct xe_device *xe) +{ + struct xe_tile *tile; + unsigned int id; + + xe_assert(xe, IS_SRIOV_VF(xe)); + + if (GRAPHICS_VERx100(xe) < 1210) + xelp_intr_disable(xe); + else + xe_assert(xe, xe_device_has_memirq(xe)); + + for_each_tile(tile, xe, id) { + if (xe_device_has_memirq(xe)) + xe_memirq_reset(&tile->sriov.vf.memirq); + else + gt_irq_reset(tile); + } +} + static void xe_irq_reset(struct xe_device *xe) { struct xe_tile *tile; u8 id; + if (IS_SRIOV_VF(xe)) + return vf_irq_reset(xe); + for_each_tile(tile, xe, id) { if (GRAPHICS_VERx100(xe) >= 1210) dg1_irq_reset(tile); @@ -545,8 +598,26 @@ static void xe_irq_reset(struct xe_device *xe) } } +static void vf_irq_postinstall(struct xe_device *xe) +{ + struct xe_tile *tile; + unsigned int id; + + for_each_tile(tile, xe, id) + if (xe_device_has_memirq(xe)) + xe_memirq_postinstall(&tile->sriov.vf.memirq); + + if (GRAPHICS_VERx100(xe) < 1210) + xelp_intr_enable(xe, true); + else + xe_assert(xe, xe_device_has_memirq(xe)); +} + static void xe_irq_postinstall(struct xe_device *xe) { + if (IS_SRIOV_VF(xe)) + return vf_irq_postinstall(xe); + xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); /* @@ -563,8 +634,30 @@ static void xe_irq_postinstall(struct xe_device *xe) xelp_intr_enable(xe, true); } +static irqreturn_t vf_mem_irq_handler(int irq, void *arg) +{ + struct xe_device *xe = arg; + struct xe_tile *tile; + unsigned int id; + + spin_lock(&xe->irq.lock); + if (!xe->irq.enabled) { + spin_unlock(&xe->irq.lock); + return IRQ_NONE; + } + spin_unlock(&xe->irq.lock); + + for_each_tile(tile, xe, id) + xe_memirq_handler(&tile->sriov.vf.memirq); + + return IRQ_HANDLED; +} + static irq_handler_t xe_irq_handler(struct xe_device *xe) { + if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) + return vf_mem_irq_handler; + if (GRAPHICS_VERx100(xe) >= 1210) return dg1_irq_handler; else @@ -590,8 +683,9 @@ static void irq_uninstall(struct drm_device *drm, void *arg) int xe_irq_install(struct xe_device *xe) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + unsigned int irq_flags = PCI_IRQ_MSIX; irq_handler_t irq_handler; - int err, irq; + int err, irq, nvec; irq_handler = xe_irq_handler(xe); if (!irq_handler) { @@ -601,7 +695,19 @@ int xe_irq_install(struct xe_device *xe) xe_irq_reset(xe); - err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); + nvec = pci_msix_vec_count(pdev); + if (nvec <= 0) { + if (nvec == -EINVAL) { + /* MSIX capability is not supported in the device, using MSI */ + irq_flags = PCI_IRQ_MSI; + nvec = 1; + } else { + drm_err(&xe->drm, "MSIX: Failed getting count\n"); + return nvec; + } + } + + err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags); if (err < 0) { drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err); return err; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 0ec5ad2539f1..8c85e90220de 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -19,6 +19,8 @@ #include "xe_gt_printk.h" #include "xe_hw_fence.h" #include "xe_map.h" +#include "xe_memirq.h" +#include "xe_sriov.h" #include "xe_vm.h" #define LRC_VALID (1 << 0) @@ -532,6 +534,27 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe) /* TODO: Timestamp */ } +static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) +{ + struct xe_memirq *memirq = >_to_tile(hwe->gt)->sriov.vf.memirq; + struct xe_device *xe = gt_to_xe(hwe->gt); + + if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe)) + return; + + regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM | + MI_LRI_LRM_CS_MMIO | MI_LRM_USE_GGTT; + regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr; + regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq); + + regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | + MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED; + regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr; + regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq); + regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr; + regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq); +} + static int lrc_ring_mi_mode(struct xe_hw_engine *hwe) { struct xe_device *xe = gt_to_xe(hwe->gt); @@ -667,6 +690,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe) regs = data + LRC_PPHWSP_SIZE; set_offsets(regs, reg_offsets(xe, hwe->class), hwe); set_context_control(regs, hwe); + set_memory_based_intr(regs, hwe); reset_stop_ring(regs, hwe); return data; @@ -964,6 +988,20 @@ static int dump_mi_command(struct drm_printer *p, drm_printf(p, " - %#6x = %#010x\n", dw[i], dw[i + 1]); return numdw; + case MI_LOAD_REGISTER_MEM & MI_OPCODE: + drm_printf(p, "[%#010x] MI_LOAD_REGISTER_MEM: %s%s\n", + inst_header, + dw[0] & MI_LRI_LRM_CS_MMIO ? "CS_MMIO " : "", + dw[0] & MI_LRM_USE_GGTT ? "USE_GGTT " : ""); + if (numdw == 4) + drm_printf(p, " - %#6x = %#010llx\n", + dw[1], ((u64)(dw[3]) << 32 | (u64)(dw[2]))); + else + drm_printf(p, " - %*ph (%s)\n", + (int)sizeof(u32) * (numdw - 1), dw + 1, + numdw < 4 ? "truncated" : "malformed"); + return numdw; + case MI_FORCE_WAKEUP: drm_printf(p, "[%#010x] MI_FORCE_WAKEUP\n", inst_header); return numdw; diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h index 78220336062c..24f20ed66fd1 100644 --- a/drivers/gpu/drm/xe/xe_lrc_types.h +++ b/drivers/gpu/drm/xe/xe_lrc_types.h @@ -28,11 +28,11 @@ struct xe_lrc { /** @ring: submission ring state */ struct { - /** @size: size of submission ring */ + /** @ring.size: size of submission ring */ u32 size; - /** @tail: tail of submission ring */ + /** @ring.tail: tail of submission ring */ u32 tail; - /** @old_tail: shadow of tail */ + /** @ring.old_tail: shadow of tail */ u32 old_tail; } ring; diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c new file mode 100644 index 000000000000..76e95535d7f6 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_memirq.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "regs/xe_gt_regs.h" +#include "regs/xe_guc_regs.h" +#include "regs/xe_regs.h" + +#include "xe_assert.h" +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_gt.h" +#include "xe_gt_printk.h" +#include "xe_guc.h" +#include "xe_hw_engine.h" +#include "xe_map.h" +#include "xe_memirq.h" +#include "xe_sriov.h" +#include "xe_sriov_printk.h" + +#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition) +#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg) + +static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq) +{ + return container_of(memirq, struct xe_tile, sriov.vf.memirq); +} + +static struct xe_device *memirq_to_xe(struct xe_memirq *memirq) +{ + return tile_to_xe(memirq_to_tile(memirq)); +} + +static const char *guc_name(struct xe_guc *guc) +{ + return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC"; +} + +/** + * DOC: Memory Based Interrupts + * + * MMIO register based interrupts infrastructure used for non-virtualized mode + * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently + * to allow delivering interrupts to a large number of Virtual machines or + * containers. Memory based interrupt status reporting provides an efficient + * and scalable infrastructure. + * + * For memory based interrupt status reporting hardware sequence is: + * * Engine writes the interrupt event to memory + * (Pointer to memory location is provided by SW. This memory surface must + * be mapped to system memory and must be marked as un-cacheable (UC) on + * Graphics IP Caches) + * * Engine triggers an interrupt to host. + */ + +/** + * DOC: Memory Based Interrupts Page Layout + * + * `Memory Based Interrupts`_ requires three different objects, which are + * called "page" in the specs, even if they aren't page-sized or aligned. + * + * To simplify the code we allocate a single page size object and then use + * offsets to embedded "pages". The address of those "pages" are then + * programmed in the HW via LRI and LRM in the context image. + * + * - _`Interrupt Status Report Page`: this page contains the interrupt + * status vectors for each unit. Each bit in the interrupt vectors is + * converted to a byte, with the byte being set to 0xFF when an + * interrupt is triggered; interrupt vectors are 16b big so each unit + * gets 16B. One space is reserved for each bit in one of the + * GT_INTR_DWx registers, so this object needs a total of 1024B. + * This object needs to be 4KiB aligned. + * + * - _`Interrupt Source Report Page`: this is the equivalent of the + * GEN11_GT_INTR_DWx registers, with each bit in those registers being + * mapped to a byte here. The offsets are the same, just bytes instead + * of bits. This object needs to be cacheline aligned. + * + * - Interrupt Mask: the HW needs a location to fetch the interrupt + * mask vector to be used by the LRM in the context, so we just use + * the next available space in the interrupt page. + * + * :: + * + * 0x0000 +===========+ <== Interrupt Status Report Page + * | | + * | | ____ +----+----------------+ + * | | / | 0 | USER INTERRUPT | + * +-----------+ __/ | 1 | | + * | HWE(n) | __ | | CTX SWITCH | + * +-----------+ \ | | WAIT SEMAPHORE | + * | | \____ | 15 | | + * | | +----+----------------+ + * | | + * 0x0400 +===========+ <== Interrupt Source Report Page + * | HWE(0) | + * | HWE(1) | + * | | + * | HWE(x) | + * 0x0440 +===========+ <== Interrupt Enable Mask + * | | + * | | + * +-----------+ + */ + +static void __release_xe_bo(struct drm_device *drm, void *arg) +{ + struct xe_bo *bo = arg; + + xe_bo_unpin_map_no_vm(bo); +} + +static int memirq_alloc_pages(struct xe_memirq *memirq) +{ + struct xe_device *xe = memirq_to_xe(memirq); + struct xe_tile *tile = memirq_to_tile(memirq); + struct xe_bo *bo; + int err; + + BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64)); + BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K)); + + /* XXX: convert to managed bo */ + bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, + ttm_bo_type_kernel, + XE_BO_CREATE_SYSTEM_BIT | + XE_BO_CREATE_GGTT_BIT | + XE_BO_NEEDS_UC | + XE_BO_NEEDS_CPU_ACCESS); + if (IS_ERR(bo)) { + err = PTR_ERR(bo); + goto out; + } + + memirq_assert(memirq, !xe_bo_is_vram(bo)); + memirq_assert(memirq, !memirq->bo); + + iosys_map_memset(&bo->vmap, 0, 0, SZ_4K); + + memirq->bo = bo; + memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET); + memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET); + memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET); + + memirq_assert(memirq, !memirq->source.is_iomem); + memirq_assert(memirq, !memirq->status.is_iomem); + memirq_assert(memirq, !memirq->mask.is_iomem); + + memirq_debug(memirq, "page offsets: source %#x status %#x\n", + xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq)); + + return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo); + +out: + xe_sriov_err(memirq_to_xe(memirq), + "Failed to allocate memirq page (%pe)\n", ERR_PTR(err)); + return err; +} + +static void memirq_set_enable(struct xe_memirq *memirq, bool enable) +{ + iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0); + + memirq->enabled = enable; +} + +/** + * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_. + * @memirq: the &xe_memirq to initialize + * + * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_ + * used by `Memory Based Interrupts`_. + * + * These allocations are managed and will be implicitly released on unload. + * + * Note: This function shall be called only by the VF driver. + * + * If this function fails then VF driver won't be able to operate correctly. + * If `Memory Based Interrupts`_ are not used this function will return 0. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_memirq_init(struct xe_memirq *memirq) +{ + struct xe_device *xe = memirq_to_xe(memirq); + int err; + + memirq_assert(memirq, IS_SRIOV_VF(xe)); + + if (!xe_device_has_memirq(xe)) + return 0; + + err = memirq_alloc_pages(memirq); + if (unlikely(err)) + return err; + + /* we need to start with all irqs enabled */ + memirq_set_enable(memirq, true); + + return 0; +} + +/** + * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_. + * @memirq: the &xe_memirq to query + * + * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * and xe_memirq_init() didn't fail. + * + * Return: GGTT's offset of the `Interrupt Source Report Page`_. + */ +u32 xe_memirq_source_ptr(struct xe_memirq *memirq) +{ + memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, memirq->bo); + + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET; +} + +/** + * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_. + * @memirq: the &xe_memirq to query + * + * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * and xe_memirq_init() didn't fail. + * + * Return: GGTT's offset of the `Interrupt Status Report Page`_. + */ +u32 xe_memirq_status_ptr(struct xe_memirq *memirq) +{ + memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, memirq->bo); + + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET; +} + +/** + * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask. + * @memirq: the &xe_memirq to query + * + * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * and xe_memirq_init() didn't fail. + * + * Return: GGTT's offset of the Interrupt Enable Mask. + */ +u32 xe_memirq_enable_ptr(struct xe_memirq *memirq) +{ + memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, memirq->bo); + + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET; +} + +/** + * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_. + * @memirq: the &xe_memirq + * @guc: the &xe_guc to setup + * + * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_ + * to be used by the GuC when `Memory Based Interrupts`_ are required. + * + * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * and xe_memirq_init() didn't fail. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc) +{ + bool is_media = xe_gt_is_media_type(guc_to_gt(guc)); + u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC); + u32 source, status; + int err; + + memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, memirq->bo); + + source = xe_memirq_source_ptr(memirq) + offset; + status = xe_memirq_status_ptr(memirq) + offset * SZ_16; + + err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY, + source); + if (unlikely(err)) + goto failed; + + err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY, + status); + if (unlikely(err)) + goto failed; + + return 0; + +failed: + xe_sriov_err(memirq_to_xe(memirq), + "Failed to setup report pages in %s (%pe)\n", + guc_name(guc), ERR_PTR(err)); + return err; +} + +/** + * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_. + * @memirq: struct xe_memirq + * + * This is part of the driver IRQ setup flow. + * + * This function shall only be used by the VF driver on platforms that use + * `Memory Based Interrupts`_. + */ +void xe_memirq_reset(struct xe_memirq *memirq) +{ + memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + + if (memirq->bo) + memirq_set_enable(memirq, false); +} + +/** + * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_. + * @memirq: the &xe_memirq + * + * This is part of the driver IRQ setup flow. + * + * This function shall only be used by the VF driver on platforms that use + * `Memory Based Interrupts`_. + */ +void xe_memirq_postinstall(struct xe_memirq *memirq) +{ + memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + + if (memirq->bo) + memirq_set_enable(memirq, true); +} + +static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector, + u16 offset, const char *name) +{ + u8 value; + + value = iosys_map_rd(vector, offset, u8); + if (value) { + if (value != 0xff) + xe_sriov_err_ratelimited(memirq_to_xe(memirq), + "Unexpected memirq value %#x from %s at %u\n", + value, name, offset); + iosys_map_wr(vector, offset, u8, 0x00); + } + + return value; +} + +static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status, + struct xe_hw_engine *hwe) +{ + memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr); + + if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name)) + xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT); +} + +static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status, + struct xe_guc *guc) +{ + const char *name = guc_name(guc); + + memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr); + + if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name)) + xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST); +} + +/** + * xe_memirq_handler - The `Memory Based Interrupts`_ Handler. + * @memirq: the &xe_memirq + * + * This function reads and dispatches `Memory Based Interrupts`. + */ +void xe_memirq_handler(struct xe_memirq *memirq) +{ + struct xe_device *xe = memirq_to_xe(memirq); + struct xe_tile *tile = memirq_to_tile(memirq); + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + struct iosys_map map; + unsigned int gtid; + struct xe_gt *gt; + + if (!memirq->bo) + return; + + memirq_assert(memirq, !memirq->source.is_iomem); + memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr); + memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32); + + for_each_gt(gt, xe, gtid) { + if (gt->tile != tile) + continue; + + for_each_hw_engine(hwe, gt, id) { + if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) { + map = IOSYS_MAP_INIT_OFFSET(&memirq->status, + hwe->irq_offset * SZ_16); + memirq_dispatch_engine(memirq, &map, hwe); + } + } + } + + /* GuC and media GuC (if present) must be checked separately */ + + if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) { + map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16); + memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc); + } + + if (!tile->media_gt) + return; + + if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) { + map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16); + memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc); + } +} diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h new file mode 100644 index 000000000000..2d40d03c3095 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_memirq.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_MEMIRQ_H_ +#define _XE_MEMIRQ_H_ + +#include <linux/types.h> + +struct xe_guc; +struct xe_memirq; + +int xe_memirq_init(struct xe_memirq *memirq); + +u32 xe_memirq_source_ptr(struct xe_memirq *memirq); +u32 xe_memirq_status_ptr(struct xe_memirq *memirq); +u32 xe_memirq_enable_ptr(struct xe_memirq *memirq); + +void xe_memirq_reset(struct xe_memirq *memirq); +void xe_memirq_postinstall(struct xe_memirq *memirq); +void xe_memirq_handler(struct xe_memirq *memirq); + +int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc); + +#endif diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h new file mode 100644 index 000000000000..625b6b8736cc --- /dev/null +++ b/drivers/gpu/drm/xe/xe_memirq_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_MEMIRQ_TYPES_H_ +#define _XE_MEMIRQ_TYPES_H_ + +#include <linux/iosys-map.h> + +struct xe_bo; + +/* ISR */ +#define XE_MEMIRQ_STATUS_OFFSET 0x0 +/* IIR */ +#define XE_MEMIRQ_SOURCE_OFFSET 0x400 +/* IMR */ +#define XE_MEMIRQ_ENABLE_OFFSET 0x440 + +/** + * struct xe_memirq - Data used by the `Memory Based Interrupts`_. + * + * @bo: buffer object with `Memory Based Interrupts Page Layout`_. + * @source: iosys pointer to `Interrupt Source Report Page`_. + * @status: iosys pointer to `Interrupt Status Report Page`_. + * @mask: iosys pointer to Interrupt Enable Mask. + * @enabled: internal flag used to control processing of the interrupts. + */ +struct xe_memirq { + struct xe_bo *bo; + struct iosys_map source; + struct iosys_map status; + struct iosys_map mask; + bool enabled; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 5c6c54624252..a66fdf2d2991 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -12,7 +12,8 @@ #include <drm/ttm/ttm_tt.h> #include <drm/xe_drm.h> -#include "generated/xe_wa_oob.h" +#include <generated/xe_wa_oob.h> + #include "instructions/xe_mi_commands.h" #include "regs/xe_gpu_commands.h" #include "tests/xe_test.h" @@ -71,6 +72,16 @@ struct xe_migrate { #define NUM_KERNEL_PDE 17 #define NUM_PT_SLOTS 32 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M +#define MAX_NUM_PTE 512 + +/* + * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest + * legal value accepted. Since that instruction field is always stored in + * (val-2) format, this translates to 0x400 dwords for the true maximum length + * of the instruction. Subtracting the instruction header (1 dword) and + * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values. + */ +#define MAX_PTE_PER_SDI 0x1FE /** * xe_tile_migrate_engine() - Get this tile's migrate engine. @@ -170,11 +181,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (!IS_DGFX(xe)) { /* Write out batch too */ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; - if (xe->info.has_usm) { - batch = tile->primary_gt->usm.bb_pool->bo; - m->usm_batch_base_ofs = m->batch_base_ofs; - } - for (i = 0; i < batch->size; i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { @@ -185,6 +191,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, entry); level++; } + if (xe->info.has_usm) { + xe_tile_assert(tile, batch->size == SZ_1M); + + batch = tile->primary_gt->usm.bb_pool->bo; + m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; + xe_tile_assert(tile, batch->size == SZ_512K); + + for (i = 0; i < batch->size; + i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : + XE_PAGE_SIZE) { + entry = vm->pt_ops->pte_encode_bo(batch, i, + pat_index, 0); + + xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, + entry); + level++; + } + } } else { u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); @@ -347,7 +371,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT | - EXEC_QUEUE_FLAG_HIGH_PRIORITY); + EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0); } else { m->q = xe_exec_queue_create_class(xe, primary_gt, vm, XE_ENGINE_CLASS_COPY, @@ -444,7 +468,7 @@ static u32 pte_update_size(struct xe_migrate *m, *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0); /* MI_STORE_DATA_IMM */ - cmds += 3 * DIV_ROUND_UP(num_4k_pages, 0x1ff); + cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI); /* PDE qwords */ cmds += num_4k_pages * 2; @@ -479,7 +503,7 @@ static void emit_pte(struct xe_migrate *m, ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); while (ptes) { - u32 chunk = min(0x1ffU, ptes); + u32 chunk = min(MAX_PTE_PER_SDI, ptes); bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = ofs; @@ -1098,7 +1122,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, * This shouldn't be possible in practice.. might change when 16K * pages are used. Hence the assert. */ - xe_tile_assert(tile, update->qwords <= 0x1ff); + xe_tile_assert(tile, update->qwords < MAX_NUM_PTE); if (!ppgtt_ofs) ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile), xe_bo_addr(update->pt_bo, 0, @@ -1107,7 +1131,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, do { u64 addr = ppgtt_ofs + ofs * 8; - chunk = min(update->qwords, 0x1ffU); + chunk = min(size, MAX_PTE_PER_SDI); /* Ensure populatefn can do memset64 by aligning bb->cs */ if (!(bb->len & 1)) @@ -1204,8 +1228,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, } if (q) { fence = xe_exec_queue_last_fence_get(q, vm); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + dma_fence_put(fence); return false; + } + dma_fence_put(fence); } return true; @@ -1283,7 +1310,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, batch_size = 6 + num_updates * 2; for (i = 0; i < num_updates; i++) { - u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, 0x1ff); + u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI); /* align noop + MI_STORE_DATA_IMM cmd prefix */ batch_size += 4 * num_cmds + updates[i].qwords * 2; diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 5f6b53ea5528..e3db3a178760 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -20,6 +20,7 @@ #include "xe_gt_mcr.h" #include "xe_macros.h" #include "xe_module.h" +#include "xe_sriov.h" #include "xe_tile.h" #define XEHP_MTCFG_ADDR XE_REG(0x101800) @@ -363,13 +364,19 @@ static int xe_verify_lmem_ready(struct xe_device *xe) { struct xe_gt *gt = xe_root_mmio_gt(xe); + if (!IS_DGFX(xe)) + return 0; + + if (IS_SRIOV_VF(xe)) + return 0; + /* * The boot firmware initializes local memory and assesses its health. * If memory training fails, the punit will have been instructed to * keep the GT powered down; we won't be able to communicate with it * and we should not continue with driver initialization. */ - if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) { + if (!(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) { drm_err(&xe->drm, "VRAM not initialized by firmware\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index ef79552e4f2f..609d997b3e9b 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -13,6 +13,7 @@ #include "xe_gt_mcr.h" #include "xe_mmio.h" #include "xe_platform_types.h" +#include "xe_sriov.h" #include "xe_step_types.h" #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) @@ -290,18 +291,6 @@ static const struct xe_mocs_entry dg2_mocs_desc[] = { MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), }; -static const struct xe_mocs_entry dg2_mocs_desc_g10_ax[] = { - /* Wa_14011441408: Set Go to Memory for MOCS#0 */ - MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), - /* UC - Coherent; GO:Memory */ - MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), - /* UC - Non-Coherent; GO:Memory */ - MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)), - - /* WB - LC */ - MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), -}; - static const struct xe_mocs_entry pvc_mocs_desc[] = { /* Error */ MOCS_ENTRY(0, 0, L3_3_WB), @@ -409,15 +398,8 @@ static unsigned int get_mocs_settings(struct xe_device *xe, info->unused_entries_index = 1; break; case XE_DG2: - if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10 && - xe->info.step.graphics >= STEP_A0 && - xe->info.step.graphics <= STEP_B0) { - info->size = ARRAY_SIZE(dg2_mocs_desc_g10_ax); - info->table = dg2_mocs_desc_g10_ax; - } else { - info->size = ARRAY_SIZE(dg2_mocs_desc); - info->table = dg2_mocs_desc; - } + info->size = ARRAY_SIZE(dg2_mocs_desc); + info->table = dg2_mocs_desc; info->uc_index = 1; info->n_entries = XELP_NUM_MOCS_ENTRIES; info->unused_entries_index = 3; @@ -558,6 +540,9 @@ void xe_mocs_init(struct xe_gt *gt) struct xe_mocs_info table; unsigned int flags; + if (IS_SRIOV_VF(gt_to_xe(gt))) + return; + /* * MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS" * registers depending on platform. diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index 1ff6bc79e7d4..e148934d554b 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -13,6 +13,7 @@ #include "xe_gt.h" #include "xe_gt_mcr.h" #include "xe_mmio.h" +#include "xe_sriov.h" #define _PAT_ATS 0x47fc #define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \ @@ -433,6 +434,10 @@ void xe_pat_init_early(struct xe_device *xe) drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n", GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100); } + + /* VFs can't program nor dump PAT settings */ + if (IS_SRIOV_VF(xe)) + xe->pat.ops = NULL; } void xe_pat_init(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index dcc5ded1558e..557f2d88a8c1 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -15,9 +15,9 @@ #include <drm/drm_drv.h> #include <drm/xe_pciids.h> +#include "display/xe_display.h" #include "regs/xe_gt_regs.h" #include "xe_device.h" -#include "xe_display.h" #include "xe_drv.h" #include "xe_gt.h" #include "xe_macros.h" @@ -165,7 +165,7 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_asid = 1, \ .has_flat_ccs = 1, \ .has_range_tlb_invalidation = 1, \ - .has_usm = 0 /* FIXME: implementation missing */, \ + .has_usm = 1, \ .va_bits = 48, \ .vm_max_level = 4, \ .hw_engine_mask = \ @@ -340,14 +340,14 @@ static const struct xe_device_desc lnl_desc = { __diag_pop(); /* Map of GMD_ID values to graphics IP */ -static struct gmdid_map graphics_ip_map[] = { +static const struct gmdid_map graphics_ip_map[] = { { 1270, &graphics_xelpg }, { 1271, &graphics_xelpg }, { 2004, &graphics_xe2 }, }; /* Map of GMD_ID values to media IP */ -static struct gmdid_map media_ip_map[] = { +static const struct gmdid_map media_ip_map[] = { { 1300, &media_xelpmp }, { 2000, &media_xe2 }, }; @@ -774,6 +774,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) str_yes_no(xe_device_has_sriov(xe)), xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); + xe_pm_init_early(xe); + err = xe_device_probe(xe); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h index 5935cfe30204..f153ce96f69a 100644 --- a/drivers/gpu/drm/xe/xe_pcode_api.h +++ b/drivers/gpu/drm/xe/xe_pcode_api.h @@ -42,6 +42,13 @@ #define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */ #define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0) +#define PCODE_FREQUENCY_CONFIG 0x6e +/* Frequency Config Sub Commands (param1) */ +#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0 +#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1 +/* Domain IDs (param2) */ +#define PCODE_MBOX_DOMAIN_HBM 0x2 + struct pcode_err_decode { int errno; const char *str; diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index b429c2876a76..ab283e9a8b4e 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -10,11 +10,11 @@ #include <drm/drm_managed.h> #include <drm/ttm/ttm_placement.h> +#include "display/xe_display.h" #include "xe_bo.h" #include "xe_bo_evict.h" #include "xe_device.h" #include "xe_device_sysfs.h" -#include "xe_display.h" #include "xe_ggtt.h" #include "xe_gt.h" #include "xe_guc.h" @@ -125,17 +125,26 @@ int xe_pm_resume(struct xe_device *xe) return 0; } -static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev) +static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) { + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); struct pci_dev *root_pdev; root_pdev = pcie_find_root_port(pdev); if (!root_pdev) return false; - /* D3Cold requires PME capability and _PR3 power resource */ - if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev)) + /* D3Cold requires PME capability */ + if (!pci_pme_capable(root_pdev, PCI_D3cold)) { + drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); return false; + } + + /* D3Cold requires _PR3 power resource */ + if (!pci_pr3_present(root_pdev)) { + drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); + return false; + } return true; } @@ -163,17 +172,21 @@ static void xe_pm_runtime_init(struct xe_device *xe) pm_runtime_put(dev); } -void xe_pm_init(struct xe_device *xe) +void xe_pm_init_early(struct xe_device *xe) { - struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); + drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); +} +void xe_pm_init(struct xe_device *xe) +{ /* For now suspend/resume is only allowed with GuC */ if (!xe_device_uc_enabled(xe)) return; drmm_mutex_init(&xe->drm, &xe->d3cold.lock); - xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev); + xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); if (xe->d3cold.capable) { xe_device_sysfs_init(xe); @@ -214,6 +227,7 @@ struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) int xe_pm_runtime_suspend(struct xe_device *xe) { + struct xe_bo *bo, *on; struct xe_gt *gt; u8 id; int err = 0; @@ -247,6 +261,16 @@ int xe_pm_runtime_suspend(struct xe_device *xe) */ lock_map_acquire(&xe_device_mem_access_lockdep_map); + /* + * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify + * also checks and delets bo entry from user fault list. + */ + mutex_lock(&xe->mem_access.vram_userfault.lock); + list_for_each_entry_safe(bo, on, + &xe->mem_access.vram_userfault.list, vram_userfault_link) + xe_bo_runtime_pm_release_mmap_offset(bo); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + if (xe->d3cold.allowed) { err = xe_bo_evict_all(xe); if (err) diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 6b9031f7af24..64a97c6726a7 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -20,6 +20,7 @@ struct xe_device; int xe_pm_suspend(struct xe_device *xe); int xe_pm_resume(struct xe_device *xe); +void xe_pm_init_early(struct xe_device *xe); void xe_pm_init(struct xe_device *xe); void xe_pm_runtime_fini(struct xe_device *xe); int xe_pm_runtime_suspend(struct xe_device *xe); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index e45b37c3f0c2..7f54bc3e389d 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -20,8 +20,8 @@ struct xe_pt_dir { struct xe_pt pt; - /** @dir: Directory structure for the xe_pt_walk functionality */ - struct xe_ptw_dir dir; + /** @children: Array of page-table child nodes */ + struct xe_ptw *children[XE_PDES]; }; #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) @@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) { - return container_of(pt_dir->dir.entries[index], struct xe_pt, base); + return container_of(pt_dir->children[index], struct xe_pt, base); } static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, @@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, XE_PTE_NULL; } +static void xe_pt_free(struct xe_pt *pt) +{ + if (pt->level) + kfree(as_xe_pt_dir(pt)); + else + kfree(pt); +} + /** * xe_pt_create() - Create a page-table. * @vm: The vm to create for. @@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, { struct xe_pt *pt; struct xe_bo *bo; - size_t size; int err; - size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) + - XE_PDES * sizeof(struct xe_ptw *); - pt = kzalloc(size, GFP_KERNEL); + if (level) { + struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL); + + pt = (dir) ? &dir->pt : NULL; + } else { + pt = kzalloc(sizeof(*pt), GFP_KERNEL); + } if (!pt) return ERR_PTR(-ENOMEM); + pt->level = level; bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, ttm_bo_type_kernel, XE_BO_CREATE_VRAM_IF_DGFX(tile) | @@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, goto err_kfree; } pt->bo = bo; - pt->level = level; - pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL; + pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; if (vm->xef) xe_drm_client_add_bo(vm->xef->client, pt->bo); @@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, return pt; err_kfree: - kfree(pt); + xe_pt_free(pt); return ERR_PTR(err); } @@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) deferred); } } - kfree(pt); + xe_pt_free(pt); } /** @@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, struct iosys_map *map = &parent->bo->vmap; if (unlikely(xe_child)) - parent->base.dir->entries[offset] = &xe_child->base; + parent->base.children[offset] = &xe_child->base; xe_pt_write(xe_walk->vm->xe, map, offset, pte); parent->num_live++; @@ -488,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, * this device *requires* 64K PTE size for VRAM, fail. */ if (level == 0 && !xe_parent->is_compact) { - if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) + if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { + xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K; pte |= XE_PTE_PS64; - else if (XE_WARN_ON(xe_walk->needs_64K)) + } else if (XE_WARN_ON(xe_walk->needs_64K)) { return -EINVAL; + } } ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); @@ -534,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, *child = &xe_child->base; /* - * Prefer the compact pagetable layout for L0 if possible. + * Prefer the compact pagetable layout for L0 if possible. Only + * possible if VMA covers entire 2MB region as compact 64k and + * 4k pages cannot be mixed within a 2MB region. * TODO: Suballocate the pt bo to avoid wasting a lot of * memory. */ if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && covers && xe_pt_scan_64K(addr, next, xe_walk)) { walk->shifts = xe_compact_pt_shifts; + xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT; flags |= XE_PDE_64K; xe_child->is_compact = true; } @@ -853,7 +869,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma, xe_pt_destroy(xe_pt_entry(pt_dir, j_), xe_vma_vm(vma)->flags, deferred); - pt_dir->dir.entries[j_] = &newpte->base; + pt_dir->children[j_] = &newpte->base; } kfree(entries[i].pt_entries); } @@ -861,8 +877,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma, static int xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, - struct xe_vm_pgtable_update *entries, u32 *num_entries, - bool rebind) + struct xe_vm_pgtable_update *entries, u32 *num_entries) { int err; @@ -1218,7 +1233,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue "Preparing bind, with range [%llx...%llx) engine %p.\n", xe_vma_start(vma), xe_vma_end(vma), q); - err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind); + err = xe_pt_prepare_bind(tile, vma, entries, &num_entries); if (err) goto err; xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries)); @@ -1507,7 +1522,7 @@ xe_pt_commit_unbind(struct xe_vma *vma, xe_pt_destroy(xe_pt_entry(pt_dir, i), xe_vma_vm(vma)->flags, deferred); - pt_dir->dir.entries[i] = NULL; + pt_dir->children[i] = NULL; } } } diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c index 8f6c8d063f39..b8b3d2aea492 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.c +++ b/drivers/gpu/drm/xe/xe_pt_walk.c @@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level, u64 addr, u64 end, struct xe_pt_walk *walk) { pgoff_t offset = xe_pt_offset(addr, level, walk); - struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL; + struct xe_ptw **entries = parent->children ? parent->children : NULL; const struct xe_pt_walk_ops *ops = walk->ops; enum page_walk_action action; struct xe_ptw *child; diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h index ec3d1e9efa6d..5ecc4d2f0f65 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.h +++ b/drivers/gpu/drm/xe/xe_pt_walk.h @@ -8,28 +8,15 @@ #include <linux/pagewalk.h> #include <linux/types.h> -struct xe_ptw_dir; - /** * struct xe_ptw - base class for driver pagetable subclassing. - * @dir: Pointer to an array of children if any. + * @children: Pointer to an array of children if any. * * Drivers could subclass this, and if it's a page-directory, typically - * embed the xe_ptw_dir::entries array in the same allocation. + * embed an array of xe_ptw pointers. */ struct xe_ptw { - struct xe_ptw_dir *dir; -}; - -/** - * struct xe_ptw_dir - page directory structure - * @entries: Array holding page directory children. - * - * It is the responsibility of the user to ensure @entries is - * correctly sized. - */ -struct xe_ptw_dir { - struct xe_ptw *entries[0]; + struct xe_ptw **children; }; /** diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 7e924faeeea0..92bb06c0586e 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -198,7 +198,7 @@ static int query_engines(struct xe_device *xe, return -EINVAL; } - engines = kmalloc(size, GFP_KERNEL); + engines = kzalloc(size, GFP_KERNEL); if (!engines) return -ENOMEM; @@ -212,14 +212,10 @@ static int query_engines(struct xe_device *xe, engines->engines[i].instance.engine_instance = hwe->logical_instance; engines->engines[i].instance.gt_id = gt->info.id; - engines->engines[i].instance.pad = 0; - memset(engines->engines[i].reserved, 0, - sizeof(engines->engines[i].reserved)); i++; } - engines->pad = 0; engines->num_engines = i; if (copy_to_user(query_ptr, engines, size)) { @@ -520,6 +516,49 @@ static int query_gt_topology(struct xe_device *xe, return 0; } +static int +query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) +{ + struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data); + size_t size = sizeof(struct drm_xe_query_uc_fw_version); + struct drm_xe_query_uc_fw_version resp; + struct xe_uc_fw_version *version = NULL; + + if (query->size == 0) { + query->size = size; + return 0; + } else if (XE_IOCTL_DBG(xe, query->size != size)) { + return -EINVAL; + } + + if (copy_from_user(&resp, query_ptr, size)) + return -EFAULT; + + if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) + return -EINVAL; + + switch (resp.uc_type) { + case XE_QUERY_UC_TYPE_GUC_SUBMISSION: { + struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; + + version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]; + break; + } + default: + return -EINVAL; + } + + resp.branch_ver = 0; + resp.major_ver = version->major; + resp.minor_ver = version->minor; + resp.patch_ver = version->patch; + + if (copy_to_user(query_ptr, &resp, size)) + return -EFAULT; + + return 0; +} + static int (* const xe_query_funcs[])(struct xe_device *xe, struct drm_xe_device_query *query) = { query_engines, @@ -529,6 +568,7 @@ static int (* const xe_query_funcs[])(struct xe_device *xe, query_hwconfig, query_gt_topology, query_engine_cycles, + query_uc_fw_version, }; int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c index d35d9ec58e86..372378e89e98 100644 --- a/drivers/gpu/drm/xe/xe_range_fence.c +++ b/drivers/gpu/drm/xe/xe_range_fence.c @@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last) return xe_range_fence_tree_iter_next(rfence, start, last); } +static void xe_range_fence_free(struct xe_range_fence *rfence) +{ + kfree(rfence); +} + const struct xe_range_fence_ops xe_range_fence_kfree_ops = { - .free = (void (*)(struct xe_range_fence *rfence)) kfree, + .free = xe_range_fence_free, }; diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index 87adefb56024..440ac572f6e5 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -231,7 +231,7 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) if (err) goto err_force_wake; - p = drm_debug_printer(KBUILD_MODNAME); + p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL); xa_for_each(&sr->xa, reg, entry) { if (slot == RING_MAX_NONPRIV_SLOTS) { xe_gt_err(gt, diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c index e66ae1bdaf9c..3fa2ece7d228 100644 --- a/drivers/gpu/drm/xe/xe_reg_whitelist.c +++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c @@ -7,9 +7,11 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" +#include "regs/xe_regs.h" #include "xe_gt_types.h" #include "xe_platform_types.h" #include "xe_rtp.h" +#include "xe_step.h" #undef XE_REG_MCR #define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1) @@ -56,6 +58,12 @@ static const struct xe_rtp_entry_sr register_whitelist[] = { RING_FORCE_TO_NONPRIV_DENY, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, + { XE_RTP_NAME("16020183090"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0), + ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(WHITELIST(CSBE_DEBUG_STATUS(RENDER_RING_BASE), 0)) + }, + {} }; diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 1e4c06eacd98..c4edffcd4a32 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -5,7 +5,8 @@ #include "xe_ring_ops.h" -#include "generated/xe_wa_oob.h" +#include <generated/xe_wa_oob.h> + #include "instructions/xe_mi_commands.h" #include "regs/xe_engine_regs.h" #include "regs/xe_gpu_commands.h" @@ -113,6 +114,19 @@ static int emit_flush_invalidate(u32 flag, u32 *dw, int i) return i; } +static int +emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, u32 value) +{ + dw[i++] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; + dw[i++] = bit_group_1; + dw[i++] = offset; + dw[i++] = 0; + dw[i++] = value; + dw[i++] = 0; + + return i; +} + static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw, int i) { @@ -131,14 +145,7 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw, flags &= ~mask_flags; - dw[i++] = GFX_OP_PIPE_CONTROL(6); - dw[i++] = flags; - dw[i++] = LRC_PPHWSP_SCRATCH_ADDR; - dw[i++] = 0; - dw[i++] = 0; - dw[i++] = 0; - - return i; + return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0); } static int emit_store_imm_ppgtt_posted(u64 addr, u64 value, @@ -174,14 +181,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i) else if (job->q->class == XE_ENGINE_CLASS_COMPUTE) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; - dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH; - dw[i++] = flags; - dw[i++] = 0; - dw[i++] = 0; - dw[i++] = 0; - dw[i++] = 0; - - return i; + return emit_pipe_control(dw, i, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0, 0); } static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int i) @@ -189,14 +189,9 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int if (hwe->class != XE_ENGINE_CLASS_RENDER) return i; - if (XE_WA(hwe->gt, 16020292621)) { - dw[i++] = GFX_OP_PIPE_CONTROL(6); - dw[i++] = PIPE_CONTROL_LRI_POST_SYNC; - dw[i++] = RING_NOPID(hwe->mmio_base).addr; - dw[i++] = 0; - dw[i++] = 0; - dw[i++] = 0; - } + if (XE_WA(hwe->gt, 16020292621)) + i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC, + RING_NOPID(hwe->mmio_base).addr, 0); return i; } @@ -204,16 +199,13 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw, int i) { - dw[i++] = GFX_OP_PIPE_CONTROL(6); - dw[i++] = (stall_only ? PIPE_CONTROL_CS_STALL : - PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL) | - PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE; - dw[i++] = addr; - dw[i++] = 0; - dw[i++] = value; - dw[i++] = 0; /* We're thrashing one extra dword. */ + u32 flags = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_QW_WRITE; - return i; + if (!stall_only) + flags |= PIPE_CONTROL_FLUSH_ENABLE; + + return emit_pipe_control(dw, i, 0, flags, addr, value); } static u32 get_ppgtt_flag(struct xe_sched_job *job) diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 01106a1156ad..8151ddafb940 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -274,7 +274,44 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) struct dma_fence *fence; fence = xe_exec_queue_last_fence_get(job->q, vm); - dma_fence_get(fence); return drm_sched_job_add_dependency(&job->drm, fence); } + +struct xe_sched_job_snapshot * +xe_sched_job_snapshot_capture(struct xe_sched_job *job) +{ + struct xe_exec_queue *q = job->q; + struct xe_device *xe = q->gt->tile->xe; + struct xe_sched_job_snapshot *snapshot; + size_t len = sizeof(*snapshot) + (sizeof(u64) * q->width); + u16 i; + + snapshot = kzalloc(len, GFP_ATOMIC); + if (!snapshot) + return NULL; + + snapshot->batch_addr_len = q->width; + for (i = 0; i < q->width; i++) + snapshot->batch_addr[i] = xe_device_uncanonicalize_addr(xe, job->batch_addr[i]); + + return snapshot; +} + +void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot) +{ + kfree(snapshot); +} + +void +xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, + struct drm_printer *p) +{ + u16 i; + + if (!snapshot) + return; + + for (i = 0; i < snapshot->batch_addr_len; i++) + drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]); +} diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h index 34f475ba7f50..f1a660648cf0 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.h +++ b/drivers/gpu/drm/xe/xe_sched_job.h @@ -8,6 +8,7 @@ #include "xe_sched_job_types.h" +struct drm_printer; struct xe_vm; #define XE_SCHED_HANG_LIMIT 1 @@ -77,4 +78,8 @@ xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags) bool xe_sched_job_is_migration(struct xe_exec_queue *q); +struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job); +void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot); +void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p); + #endif diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h index 71213ba9735b..b1d83da50a53 100644 --- a/drivers/gpu/drm/xe/xe_sched_job_types.h +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h @@ -30,11 +30,11 @@ struct xe_sched_job { struct dma_fence *fence; /** @user_fence: write back value when BB is complete */ struct { - /** @used: user fence is used */ + /** @user_fence.used: user fence is used */ bool used; - /** @addr: address to write to */ + /** @user_fence.addr: address to write to */ u64 addr; - /** @value: write back value */ + /** @user_fence.value: write back value */ u64 value; } user_fence; /** @migrate_flush_flags: Additional flush flags for migration jobs */ @@ -43,4 +43,9 @@ struct xe_sched_job { u64 batch_addr[]; }; +struct xe_sched_job_snapshot { + u16 batch_addr_len; + u64 batch_addr[]; +}; + #endif diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index 42a0e0c917a0..f295d91886b1 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <drm/drm_managed.h> + #include "xe_assert.h" #include "xe_sriov.h" @@ -53,3 +55,33 @@ void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov) drm_info(&xe->drm, "Running in %s mode\n", xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); } + +static void fini_sriov(struct drm_device *drm, void *arg) +{ + struct xe_device *xe = arg; + + destroy_workqueue(xe->sriov.wq); + xe->sriov.wq = NULL; +} + +/** + * xe_sriov_init - Initialize SR-IOV specific data. + * @xe: the &xe_device to initialize + * + * In this function we create dedicated workqueue that will be used + * by the SR-IOV specific workers. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_init(struct xe_device *xe) +{ + if (!IS_SRIOV(xe)) + return 0; + + xe_assert(xe, !xe->sriov.wq); + xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0); + if (!xe->sriov.wq) + return -ENOMEM; + + return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe); +} diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h index 5af73a3172b0..1545552162c9 100644 --- a/drivers/gpu/drm/xe/xe_sriov.h +++ b/drivers/gpu/drm/xe/xe_sriov.h @@ -13,6 +13,7 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode); void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov); +int xe_sriov_init(struct xe_device *xe); static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe) { diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h index 999a4311b98b..1a138108d139 100644 --- a/drivers/gpu/drm/xe/xe_sriov_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_types.h @@ -9,6 +9,18 @@ #include <linux/build_bug.h> /** + * VFID - Virtual Function Identifier + * @n: VF number + * + * Helper macro to represent Virtual Function (VF) Identifier. + * VFID(0) is used as alias to the PFID that represents Physical Function. + * + * Note: According to PCI spec, SR-IOV VF's numbers are 1-based (VF1, VF2, ...). + */ +#define VFID(n) (n) +#define PFID VFID(0) + +/** * enum xe_sriov_mode - SR-IOV mode * @XE_SRIOV_MODE_NONE: bare-metal mode (non-virtualized) * @XE_SRIOV_MODE_PF: SR-IOV Physical Function (PF) mode diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index e4c220cf9115..aab92bee1d7c 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, /* Easy case... */ if (!num_in_fence) { fence = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fence); return fence; } @@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, } } fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fences[current_fence - 1]); cf = dma_fence_array_create(num_in_fence, fences, vm->composite_fence_ctx, vm->composite_fence_seqno++, diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c index 0f8d3e7fce46..0662968d7bcb 100644 --- a/drivers/gpu/drm/xe/xe_tile_sysfs.c +++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c @@ -9,6 +9,7 @@ #include "xe_tile.h" #include "xe_tile_sysfs.h" +#include "xe_vram_freq.h" static void xe_tile_sysfs_kobj_release(struct kobject *kobj) { @@ -50,6 +51,8 @@ void xe_tile_sysfs_init(struct xe_tile *tile) tile->sysfs = &kt->base; + xe_vram_freq_sysfs_init(tile); + err = drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile); if (err) drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n", diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 95163c303f3e..3b97633d81d8 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -12,6 +12,7 @@ #include <linux/tracepoint.h> #include <linux/types.h> +#include "xe_bo.h" #include "xe_bo_types.h" #include "xe_exec_queue_types.h" #include "xe_gpu_scheduler_types.h" @@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence, TP_ARGS(fence), TP_STRUCT__entry( - __field(u64, fence) + __field(struct xe_gt_tlb_invalidation_fence *, fence) __field(int, seqno) ), TP_fast_assign( - __entry->fence = (u64)fence; + __entry->fence = fence; __entry->seqno = fence->seqno; ), - TP_printk("fence=0x%016llx, seqno=%d", + TP_printk("fence=%p, seqno=%d", __entry->fence, __entry->seqno) ); @@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo, TP_STRUCT__entry( __field(size_t, size) __field(u32, flags) - __field(u64, vm) + __field(struct xe_vm *, vm) ), TP_fast_assign( __entry->size = bo->size; __entry->flags = bo->flags; - __entry->vm = (unsigned long)bo->vm; + __entry->vm = bo->vm; ), - TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx", + TP_printk("size=%zu, flags=0x%02x, vm=%p", __entry->size, __entry->flags, __entry->vm) ); @@ -100,9 +101,27 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault, TP_ARGS(bo) ); -DEFINE_EVENT(xe_bo, xe_bo_move, - TP_PROTO(struct xe_bo *bo), - TP_ARGS(bo) +TRACE_EVENT(xe_bo_move, + TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement), + TP_ARGS(bo, new_placement, old_placement), + TP_STRUCT__entry( + __field(struct xe_bo *, bo) + __field(size_t, size) + __field(u32, new_placement) + __field(u32, old_placement) + __array(char, device_id, 12) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->size = bo->size; + __entry->new_placement = new_placement; + __entry->old_placement = old_placement; + strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12); + ), + TP_printk("migrate object %p [size %zu] from %s to %s device_id:%s", + __entry->bo, __entry->size, xe_mem_type_to_name[__entry->old_placement], + xe_mem_type_to_name[__entry->new_placement], __entry->device_id) ); DECLARE_EVENT_CLASS(xe_exec_queue, @@ -327,16 +346,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence, TP_STRUCT__entry( __field(u64, ctx) __field(u32, seqno) - __field(u64, fence) + __field(struct xe_hw_fence *, fence) ), TP_fast_assign( __entry->ctx = fence->dma.context; __entry->seqno = fence->dma.seqno; - __entry->fence = (unsigned long)fence; + __entry->fence = fence; ), - TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u", + TP_printk("ctx=0x%016llx, fence=%p, seqno=%u", __entry->ctx, __entry->fence, __entry->seqno) ); @@ -365,7 +384,7 @@ DECLARE_EVENT_CLASS(xe_vma, TP_ARGS(vma), TP_STRUCT__entry( - __field(u64, vma) + __field(struct xe_vma *, vma) __field(u32, asid) __field(u64, start) __field(u64, end) @@ -373,14 +392,14 @@ DECLARE_EVENT_CLASS(xe_vma, ), TP_fast_assign( - __entry->vma = (unsigned long)vma; + __entry->vma = vma; __entry->asid = xe_vma_vm(vma)->usm.asid; __entry->start = xe_vma_start(vma); __entry->end = xe_vma_end(vma) - 1; __entry->ptr = xe_vma_userptr(vma); ), - TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,", + TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,", __entry->vma, __entry->asid, __entry->start, __entry->end, __entry->ptr) ) @@ -465,16 +484,16 @@ DECLARE_EVENT_CLASS(xe_vm, TP_ARGS(vm), TP_STRUCT__entry( - __field(u64, vm) + __field(struct xe_vm *, vm) __field(u32, asid) ), TP_fast_assign( - __entry->vm = (unsigned long)vm; + __entry->vm = vm; __entry->asid = vm->usm.asid; ), - TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm, + TP_printk("vm=%p, asid=0x%05x", __entry->vm, __entry->asid) ); diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index e5d7d5e2bec1..3107d2a12426 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -11,7 +11,8 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_range_manager.h> -#include "generated/xe_wa_oob.h" +#include <generated/xe_wa_oob.h> + #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_bo.h" @@ -19,6 +20,7 @@ #include "xe_gt.h" #include "xe_mmio.h" #include "xe_res_cursor.h" +#include "xe_sriov.h" #include "xe_ttm_stolen_mgr.h" #include "xe_ttm_vram_mgr.h" #include "xe_wa.h" @@ -205,7 +207,9 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe) u64 stolen_size, io_size, pgsize; int err; - if (IS_DGFX(xe)) + if (IS_SRIOV_VF(xe)) + stolen_size = 0; + else if (IS_DGFX(xe)) stolen_size = detect_bar2_dgfx(xe, mgr); else if (GRAPHICS_VERx100(xe) >= 1270) stolen_size = detect_bar2_integrated(xe, mgr); diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index 53ccd338fd8c..5c83c75bc497 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -37,7 +37,14 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, - + { XE_RTP_NAME("Tuning: Compression Overfetch"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)), + }, + { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2004, XE_RTP_END_VERSION_UNDEFINED)), + XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN)) + }, {} }; diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index 25e1ddfd2f86..7033f8c1b431 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -7,8 +7,10 @@ #include "xe_device.h" #include "xe_gsc.h" +#include "xe_gsc_proxy.h" #include "xe_gt.h" #include "xe_guc.h" +#include "xe_guc_db_mgr.h" #include "xe_guc_pc.h" #include "xe_guc_submit.h" #include "xe_huc.h" @@ -30,13 +32,15 @@ uc_to_xe(struct xe_uc *uc) /* Should be called once at driver load only */ int xe_uc_init(struct xe_uc *uc) { + struct xe_device *xe = uc_to_xe(uc); int ret; + xe_device_mem_access_get(xe); + /* * We call the GuC/HuC/GSC init functions even if GuC submission is off * to correctly move our tracking of the FW state to "disabled". */ - ret = xe_guc_init(&uc->guc); if (ret) goto err; @@ -50,7 +54,7 @@ int xe_uc_init(struct xe_uc *uc) goto err; if (!xe_device_uc_enabled(uc_to_xe(uc))) - return 0; + goto err; ret = xe_wopcm_init(&uc->wopcm); if (ret) @@ -60,9 +64,17 @@ int xe_uc_init(struct xe_uc *uc) if (ret) goto err; + ret = xe_guc_db_mgr_init(&uc->guc.dbm, ~0); + if (ret) + goto err; + + xe_device_mem_access_put(xe); + return 0; err: + xe_device_mem_access_put(xe); + return ret; } @@ -88,6 +100,10 @@ int xe_uc_init_post_hwconfig(struct xe_uc *uc) if (err) return err; + err = xe_huc_init_post_hwconfig(&uc->huc); + if (err) + return err; + return xe_gsc_init_post_hwconfig(&uc->gsc); } @@ -256,3 +272,16 @@ int xe_uc_suspend(struct xe_uc *uc) return xe_guc_suspend(&uc->guc); } + +/** + * xe_uc_remove() - Clean up the UC structures before driver removal + * @uc: the UC object + * + * This function should only act on objects/structures that must be cleaned + * before the driver removal callback is complete and therefore can't be + * deferred to a drmm action. + */ +void xe_uc_remove(struct xe_uc *uc) +{ + xe_gsc_remove(&uc->gsc); +} diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h index 5d5110c0c834..e4d4e3c99f0e 100644 --- a/drivers/gpu/drm/xe/xe_uc.h +++ b/drivers/gpu/drm/xe/xe_uc.h @@ -20,5 +20,6 @@ int xe_uc_stop(struct xe_uc *uc); int xe_uc_start(struct xe_uc *uc); int xe_uc_suspend(struct xe_uc *uc); int xe_uc_sanitize_reset(struct xe_uc *uc); +void xe_uc_remove(struct xe_uc *uc); #endif diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 9dff96dfe455..a9d25b3fa67c 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -92,6 +92,7 @@ struct uc_fw_entry { const char *path; u16 major; u16 minor; + u16 patch; bool full_ver_required; }; }; @@ -102,14 +103,15 @@ struct fw_blobs_by_type { }; #define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 7)) \ - fw_def(DG2, major_ver(i915, guc, dg2, 70, 5)) \ - fw_def(DG1, major_ver(i915, guc, dg1, 70, 5)) \ - fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 5)) \ - fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 5)) \ - fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 5)) \ - fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 5)) \ - fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 5)) + fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \ + fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \ + fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \ + fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \ + fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \ + fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \ + fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \ + fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \ + fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \ @@ -121,24 +123,24 @@ struct fw_blobs_by_type { /* for the GSC FW we match the compatibility version and not the release one */ #define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \ - fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0)) + fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0)) #define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \ __stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin" #define fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c) \ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a ## . ## b ## . ## c)) -#define fw_filename_major_ver(dir_, uc_, shortname_, a, b) \ +#define fw_filename_major_ver(dir_, uc_, shortname_, a, b, c) \ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a)) #define fw_filename_no_ver(dir_, uc_, shortname_) \ MAKE_FW_PATH(dir_, uc_, shortname_, "") #define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \ { fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \ - a, b, true } -#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b) \ - { fw_filename_major_ver(dir_, uc_, shortname_, a, b), \ - a, b } + a, b, c, true } +#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b, c) \ + { fw_filename_major_ver(dir_, uc_, shortname_, a, b, c), \ + a, b, c } #define uc_fw_entry_no_ver(dir_, uc_, shortname_) \ { fw_filename_no_ver(dir_, uc_, shortname_), \ 0, 0 } @@ -221,6 +223,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) uc_fw->path = entries[i].path; uc_fw->versions.wanted.major = entries[i].major; uc_fw->versions.wanted.minor = entries[i].minor; + uc_fw->versions.wanted.patch = entries[i].patch; uc_fw->full_ver_required = entries[i].full_ver_required; if (uc_fw->type == XE_UC_FW_TYPE_GSC) @@ -340,19 +343,22 @@ int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw) * Otherwise, at least the major version. */ if (wanted->major != found->major || - (uc_fw->full_ver_required && wanted->minor != found->minor)) { - drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", + (uc_fw->full_ver_required && + ((wanted->minor != found->minor) || + (wanted->patch != found->patch)))) { + drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u.%u != %u.%u.%u\n", xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, - found->major, found->minor, - wanted->major, wanted->minor); + found->major, found->minor, found->patch, + wanted->major, wanted->minor, wanted->patch); goto fail; } - if (wanted->minor > found->minor) { - drm_notice(&xe->drm, "%s firmware (%u.%u) is recommended, but only (%u.%u) was found in %s\n", + if (wanted->minor > found->minor || + (wanted->minor == found->minor && wanted->patch > found->patch)) { + drm_notice(&xe->drm, "%s firmware (%u.%u.%u) is recommended, but only (%u.%u.%u) was found in %s\n", xe_uc_fw_type_repr(uc_fw->type), - wanted->major, wanted->minor, - found->major, found->minor, + wanted->major, wanted->minor, wanted->patch, + found->major, found->minor, found->patch, uc_fw->path); drm_info(&xe->drm, "Consider updating your linux-firmware pkg or downloading from %s\n", XE_UC_FIRMWARE_URL); @@ -652,14 +658,18 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar xe_assert(xe, !uc_fw->path); uc_fw_auto_select(xe, uc_fw); + uc_fw_override(uc_fw); xe_uc_fw_change_status(uc_fw, uc_fw->path ? XE_UC_FIRMWARE_SELECTED : XE_UC_FIRMWARE_NOT_SUPPORTED); - if (!xe_uc_fw_is_supported(uc_fw)) + if (!xe_uc_fw_is_supported(uc_fw)) { + if (uc_fw->type == XE_UC_FW_TYPE_GUC) { + drm_err(&xe->drm, "No GuC firmware defined for platform\n"); + return -ENOENT; + } return 0; - - uc_fw_override(uc_fw); + } /* an empty path means the firmware is disabled */ if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) { diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h index ee914a5d8523..bc800b696866 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw_types.h +++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h @@ -124,11 +124,14 @@ struct xe_uc_fw { /** @versions: FW versions wanted and found */ struct { - /** @wanted: firmware version wanted by platform */ + /** @versions.wanted: firmware version wanted by platform */ struct xe_uc_fw_version wanted; - /** @wanted_type: type of firmware version wanted (release vs compatibility) */ + /** + * @versions.wanted_type: type of firmware version wanted + * (release vs compatibility) + */ enum xe_uc_fw_version_types wanted_type; - /** @found: fw versions found in firmware blob */ + /** @versions.found: fw versions found in firmware blob */ struct xe_uc_fw_version found[XE_UC_FW_VER_TYPE_COUNT]; } versions; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 30db264d34a3..e3bde897f6e8 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -13,11 +13,14 @@ #include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_tt.h> #include <drm/xe_drm.h> +#include <linux/ascii85.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/mm.h> #include <linux/swap.h> +#include <generated/xe_wa_oob.h> + #include "xe_assert.h" #include "xe_bo.h" #include "xe_device.h" @@ -34,11 +37,8 @@ #include "xe_res_cursor.h" #include "xe_sync.h" #include "xe_trace.h" -#include "generated/xe_wa_oob.h" #include "xe_wa.h" -#define TEST_VM_ASYNC_OPS_ERROR - static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) { return vm->gpuvm.r_obj; @@ -114,11 +114,8 @@ retry: num_pages - pinned, read_only ? 0 : FOLL_WRITE, &pages[pinned]); - if (ret < 0) { - if (in_kthread) - ret = 0; + if (ret < 0) break; - } pinned += ret; ret = 0; @@ -797,6 +794,7 @@ static void xe_vma_free(struct xe_vma *vma) #define VMA_CREATE_FLAG_READ_ONLY BIT(0) #define VMA_CREATE_FLAG_IS_NULL BIT(1) +#define VMA_CREATE_FLAG_DUMPABLE BIT(2) static struct xe_vma *xe_vma_create(struct xe_vm *vm, struct xe_bo *bo, @@ -809,6 +807,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, u8 id; bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY); bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL); + bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE); xe_assert(vm->xe, start < end); xe_assert(vm->xe, end < vm->size); @@ -843,6 +842,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, vma->gpuva.va.range = end - start + 1; if (read_only) vma->gpuva.flags |= XE_VMA_READ_ONLY; + if (dumpable) + vma->gpuva.flags |= XE_VMA_DUMPABLE; for_each_tile(tile, vm->xe, id) vma->tile_mask |= 0x1 << id; @@ -1000,9 +1001,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, int err; XE_WARN_ON(!vm); - err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); - if (!err && bo && !bo->vm) - err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + if (num_shared) + err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); + else + err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); + if (!err && bo && !bo->vm) { + if (num_shared) + err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + else + err = drm_exec_lock_obj(exec, &bo->ttm.base); + } return err; } @@ -1049,7 +1057,9 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) xe_assert(vm->xe, xe_vma_vm(vma) == vm); lockdep_assert_held(&vm->lock); + mutex_lock(&vm->snap_mutex); err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); + mutex_unlock(&vm->snap_mutex); XE_WARN_ON(err); /* Shouldn't be possible */ return err; @@ -1060,7 +1070,9 @@ static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) xe_assert(vm->xe, xe_vma_vm(vma) == vm); lockdep_assert_held(&vm->lock); + mutex_lock(&vm->snap_mutex); drm_gpuva_remove(&vma->gpuva); + mutex_unlock(&vm->snap_mutex); if (vm->usm.last_fault_vma == vma) vm->usm.last_fault_vma = NULL; } @@ -1079,7 +1091,7 @@ static struct drm_gpuva_op *xe_vm_op_alloc(void) static void xe_vm_free(struct drm_gpuvm *gpuvm); -static struct drm_gpuvm_ops gpuvm_ops = { +static const struct drm_gpuvm_ops gpuvm_ops = { .op_alloc = xe_vm_op_alloc, .vm_bo_validate = xe_gpuvm_validate, .vm_free = xe_vm_free, @@ -1287,6 +1299,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) vm->flags = flags; init_rwsem(&vm->lock); + mutex_init(&vm->snap_mutex); INIT_LIST_HEAD(&vm->rebind_list); @@ -1412,6 +1425,7 @@ err_close: return ERR_PTR(err); err_no_resv: + mutex_destroy(&vm->snap_mutex); for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); kfree(vm); @@ -1536,6 +1550,8 @@ static void vm_destroy_work_func(struct work_struct *w) /* xe_vm_close_and_put was not called? */ xe_assert(xe, !vm->size); + mutex_destroy(&vm->snap_mutex); + if (!(vm->flags & XE_VM_FLAG_MIGRATION)) { xe_device_mem_access_put(xe); @@ -1984,6 +2000,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, xe_exec_queue_last_fence_get(wait_exec_queue, vm); xe_sync_entry_signal(&syncs[i], NULL, fence); + dma_fence_put(fence); } } @@ -2064,7 +2081,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; struct drm_gpuva_ops *ops; struct drm_gpuva_op *__op; - struct xe_vma_op *op; struct drm_gpuvm_bo *vm_bo; int err; @@ -2111,15 +2127,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, if (IS_ERR(ops)) return ops; -#ifdef TEST_VM_ASYNC_OPS_ERROR - if (operation & FORCE_ASYNC_OP_ERROR) { - op = list_first_entry_or_null(&ops->list, struct xe_vma_op, - base.entry); - if (op) - op->inject_error = true; - } -#endif - drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); @@ -2129,6 +2136,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, op->map.read_only = flags & DRM_XE_VM_BIND_FLAG_READONLY; op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; + op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; op->map.pat_index = pat_index; } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { op->prefetch.region = prefetch_region; @@ -2197,13 +2205,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma) { if (vma->gpuva.flags & XE_VMA_PTE_1G) return SZ_1G; - else if (vma->gpuva.flags & XE_VMA_PTE_2M) + else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT)) return SZ_2M; + else if (vma->gpuva.flags & XE_VMA_PTE_64K) + return SZ_64K; + else if (vma->gpuva.flags & XE_VMA_PTE_4K) + return SZ_4K; - return SZ_4K; + return SZ_1G; /* Uninitialized, used max size */ } -static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) +static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size) { switch (size) { case SZ_1G: @@ -2212,9 +2224,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) case SZ_2M: vma->gpuva.flags |= XE_VMA_PTE_2M; break; + case SZ_64K: + vma->gpuva.flags |= XE_VMA_PTE_64K; + break; + case SZ_4K: + vma->gpuva.flags |= XE_VMA_PTE_4K; + break; } - - return SZ_4K; } static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) @@ -2286,6 +2302,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, struct list_head *ops_list, bool last) { + struct xe_device *xe = vm->xe; struct xe_vma_op *last_op = NULL; struct drm_gpuva_op *__op; int err = 0; @@ -2316,6 +2333,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, VMA_CREATE_FLAG_READ_ONLY : 0; flags |= op->map.is_null ? VMA_CREATE_FLAG_IS_NULL : 0; + flags |= op->map.dumpable ? + VMA_CREATE_FLAG_DUMPABLE : 0; vma = new_vma(vm, &op->base.map, op->map.pat_index, flags); @@ -2340,6 +2359,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, flags |= op->base.remap.unmap->va->flags & DRM_GPUVA_SPARSE ? VMA_CREATE_FLAG_IS_NULL : 0; + flags |= op->base.remap.unmap->va->flags & + XE_VMA_DUMPABLE ? + VMA_CREATE_FLAG_DUMPABLE : 0; vma = new_vma(vm, op->base.remap.prev, old->pat_index, flags); @@ -2361,6 +2383,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, xe_vma_end(vma) - xe_vma_start(old); op->remap.start = xe_vma_end(vma); + vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx", + (ULL)op->remap.start, + (ULL)op->remap.range); } } @@ -2371,6 +2396,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, flags |= op->base.remap.unmap->va->flags & DRM_GPUVA_SPARSE ? VMA_CREATE_FLAG_IS_NULL : 0; + flags |= op->base.remap.unmap->va->flags & + XE_VMA_DUMPABLE ? + VMA_CREATE_FLAG_DUMPABLE : 0; vma = new_vma(vm, op->base.remap.next, old->pat_index, flags); @@ -2391,6 +2419,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, op->remap.range -= xe_vma_end(old) - xe_vma_start(vma); + vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx", + (ULL)op->remap.start, + (ULL)op->remap.range); } } break; @@ -2530,13 +2561,25 @@ retry_userptr: } drm_exec_fini(&exec); - if (err == -EAGAIN && xe_vma_is_userptr(vma)) { + if (err == -EAGAIN) { lockdep_assert_held_write(&vm->lock); - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); - if (!err) - goto retry_userptr; - trace_xe_vma_fail(vma); + if (op->base.op == DRM_GPUVA_OP_REMAP) { + if (!op->remap.unmap_done) + vma = gpuva_to_vma(op->base.remap.unmap->va); + else if (op->remap.prev) + vma = op->remap.prev; + else + vma = op->remap.next; + } + + if (xe_vma_is_userptr(vma)) { + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); + if (!err) + goto retry_userptr; + + trace_xe_vma_fail(vma); + } } return err; @@ -2548,13 +2591,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) lockdep_assert_held_write(&vm->lock); -#ifdef TEST_VM_ASYNC_OPS_ERROR - if (op->inject_error) { - op->inject_error = false; - return -ENOMEM; - } -#endif - switch (op->base.op) { case DRM_GPUVA_OP_MAP: ret = __xe_vma_op_execute(vm, op->map.vma, op); @@ -2669,7 +2705,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, { int i; - for (i = num_ops_list - 1; i; ++i) { + for (i = num_ops_list - 1; i >= 0; --i) { struct drm_gpuva_ops *__ops = ops[i]; struct drm_gpuva_op *__op; @@ -2714,16 +2750,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, return 0; } -#ifdef TEST_VM_ASYNC_OPS_ERROR -#define SUPPORTED_FLAGS \ - (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \ - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff) -#else #define SUPPORTED_FLAGS \ (DRM_XE_VM_BIND_FLAG_READONLY | \ DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \ - 0xffff) -#endif + DRM_XE_VM_BIND_FLAG_DUMPABLE) #define XE_64K_PAGE_MASK 0xffffull #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) @@ -3248,3 +3278,168 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) return 0; } + +struct xe_vm_snapshot { + unsigned long num_snaps; + struct { + u64 ofs, bo_ofs; + unsigned long len; + struct xe_bo *bo; + void *data; + struct mm_struct *mm; + } snap[]; +}; + +struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) +{ + unsigned long num_snaps = 0, i; + struct xe_vm_snapshot *snap = NULL; + struct drm_gpuva *gpuva; + + if (!vm) + return NULL; + + mutex_lock(&vm->snap_mutex); + drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { + if (gpuva->flags & XE_VMA_DUMPABLE) + num_snaps++; + } + + if (num_snaps) + snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT); + if (!snap) + goto out_unlock; + + snap->num_snaps = num_snaps; + i = 0; + drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { + struct xe_vma *vma = gpuva_to_vma(gpuva); + struct xe_bo *bo = vma->gpuva.gem.obj ? + gem_to_xe_bo(vma->gpuva.gem.obj) : NULL; + + if (!(gpuva->flags & XE_VMA_DUMPABLE)) + continue; + + snap->snap[i].ofs = xe_vma_start(vma); + snap->snap[i].len = xe_vma_size(vma); + if (bo) { + snap->snap[i].bo = xe_bo_get(bo); + snap->snap[i].bo_ofs = xe_vma_bo_offset(vma); + } else if (xe_vma_is_userptr(vma)) { + struct mm_struct *mm = + to_userptr_vma(vma)->userptr.notifier.mm; + + if (mmget_not_zero(mm)) + snap->snap[i].mm = mm; + else + snap->snap[i].data = ERR_PTR(-EFAULT); + + snap->snap[i].bo_ofs = xe_vma_userptr(vma); + } else { + snap->snap[i].data = ERR_PTR(-ENOENT); + } + i++; + } + +out_unlock: + mutex_unlock(&vm->snap_mutex); + return snap; +} + +void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap) +{ + for (int i = 0; i < snap->num_snaps; i++) { + struct xe_bo *bo = snap->snap[i].bo; + struct iosys_map src; + int err; + + if (IS_ERR(snap->snap[i].data)) + continue; + + snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER); + if (!snap->snap[i].data) { + snap->snap[i].data = ERR_PTR(-ENOMEM); + goto cleanup_bo; + } + + if (bo) { + dma_resv_lock(bo->ttm.base.resv, NULL); + err = ttm_bo_vmap(&bo->ttm, &src); + if (!err) { + xe_map_memcpy_from(xe_bo_device(bo), + snap->snap[i].data, + &src, snap->snap[i].bo_ofs, + snap->snap[i].len); + ttm_bo_vunmap(&bo->ttm, &src); + } + dma_resv_unlock(bo->ttm.base.resv); + } else { + void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs; + + kthread_use_mm(snap->snap[i].mm); + if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len)) + err = 0; + else + err = -EFAULT; + kthread_unuse_mm(snap->snap[i].mm); + + mmput(snap->snap[i].mm); + snap->snap[i].mm = NULL; + } + + if (err) { + kvfree(snap->snap[i].data); + snap->snap[i].data = ERR_PTR(err); + } + +cleanup_bo: + xe_bo_put(bo); + snap->snap[i].bo = NULL; + } +} + +void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p) +{ + unsigned long i, j; + + for (i = 0; i < snap->num_snaps; i++) { + if (IS_ERR(snap->snap[i].data)) + goto uncaptured; + + drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len); + drm_printf(p, "[%llx].data: ", + snap->snap[i].ofs); + + for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) { + u32 *val = snap->snap[i].data + j; + char dumped[ASCII85_BUFSZ]; + + drm_puts(p, ascii85_encode(*val, dumped)); + } + + drm_puts(p, "\n"); + continue; + +uncaptured: + drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n", + snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1, + PTR_ERR(snap->snap[i].data)); + } +} + +void xe_vm_snapshot_free(struct xe_vm_snapshot *snap) +{ + unsigned long i; + + if (!snap) + return; + + for (i = 0; i < snap->num_snaps; i++) { + if (!IS_ERR(snap->snap[i].data)) + kvfree(snap->snap[i].data); + xe_bo_put(snap->snap[i].bo); + if (snap->snap[i].mm) + mmput(snap->snap[i].mm); + } + kvfree(snap); +} diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 9654a0612fc2..6df1f1c7f85d 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -211,8 +211,6 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker); int xe_vm_invalidate_vma(struct xe_vma *vma); -extern struct ttm_device_funcs xe_ttm_funcs; - static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) { xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); @@ -273,3 +271,8 @@ static inline void vm_dbg(const struct drm_device *dev, { /* noop */ } #endif #endif + +struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm); +void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap); +void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p); +void xe_vm_snapshot_free(struct xe_vm_snapshot *snap); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 1fec66ae2eb2..7d4f810f9c04 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -21,9 +21,6 @@ struct xe_bo; struct xe_sync_entry; struct xe_vm; -#define TEST_VM_ASYNC_OPS_ERROR -#define FORCE_ASYNC_OP_ERROR BIT(31) - #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) @@ -32,6 +29,9 @@ struct xe_vm; #define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5) #define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6) #define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7) +#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8) +#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9) +#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10) /** struct xe_userptr - User pointer */ struct xe_userptr { @@ -160,6 +160,11 @@ struct xe_vm { * VM */ struct rw_semaphore lock; + /** + * @snap_mutex: Mutex used to guard insertions and removals from gpuva, + * so we can take a snapshot safely from devcoredump. + */ + struct mutex snap_mutex; /** * @rebind_list: list of VMAs that need rebinding. Protected by the @@ -295,6 +300,8 @@ struct xe_vma_op_map { bool read_only; /** @is_null: is NULL binding */ bool is_null; + /** @dumpable: whether BO is dumped on GPU hang */ + bool dumpable; /** @pat_index: The pat index to use for this operation. */ u16 pat_index; }; @@ -360,11 +367,6 @@ struct xe_vma_op { /** @flags: operation flags */ enum xe_vma_op_flags flags; -#ifdef TEST_VM_ASYNC_OPS_ERROR - /** @inject_error: inject error to test async op error handling */ - bool inject_error; -#endif - union { /** @map: VMA map operation specific data */ struct xe_vma_op_map map; diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c new file mode 100644 index 000000000000..079cc283a186 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_vram_freq.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ +#include <linux/sysfs.h> +#include <drm/drm_managed.h> + +#include "xe_gt_types.h" +#include "xe_pcode.h" +#include "xe_pcode_api.h" +#include "xe_tile.h" +#include "xe_tile_sysfs.h" +#include "xe_vram_freq.h" + +/** + * DOC: Xe VRAM freq + * + * Provides sysfs entries for vram frequency in tile + * + * device/tile#/memory/freq0/max_freq - This is maximum frequency. This value is read-only as it + * is the fixed fuse point P0. It is not the system + * configuration. + * device/tile#/memory/freq0/min_freq - This is minimum frequency. This value is read-only as it + * is the fixed fuse point PN. It is not the system + * configuration. + */ + +static struct xe_tile *dev_to_tile(struct device *dev) +{ + return kobj_to_tile(dev->kobj.parent); +} + +static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct xe_tile *tile = dev_to_tile(dev); + struct xe_gt *gt = tile->primary_gt; + u32 val, mbox; + int err; + + mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG) + | REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_P0) + | REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM); + + err = xe_pcode_read(gt, mbox, &val, NULL); + if (err) + return err; + + /* data_out - Fused P0 for domain ID in units of 50 MHz */ + val *= 50; + + return sysfs_emit(buf, "%u\n", val); +} +static DEVICE_ATTR_RO(max_freq); + +static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct xe_tile *tile = dev_to_tile(dev); + struct xe_gt *gt = tile->primary_gt; + u32 val, mbox; + int err; + + mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG) + | REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_PN) + | REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM); + + err = xe_pcode_read(gt, mbox, &val, NULL); + if (err) + return err; + + /* data_out - Fused Pn for domain ID in units of 50 MHz */ + val *= 50; + + return sysfs_emit(buf, "%u\n", val); +} +static DEVICE_ATTR_RO(min_freq); + +static struct attribute *freq_attrs[] = { + &dev_attr_max_freq.attr, + &dev_attr_min_freq.attr, + NULL +}; + +static const struct attribute_group freq_group_attrs = { + .name = "freq0", + .attrs = freq_attrs, +}; + +static void vram_freq_sysfs_fini(struct drm_device *drm, void *arg) +{ + struct kobject *kobj = arg; + + sysfs_remove_group(kobj, &freq_group_attrs); + kobject_put(kobj); +} + +/** + * xe_vram_freq_sysfs_init - Initialize vram frequency sysfs component + * @tile: Xe Tile object + * + * It needs to be initialized after the main tile component is ready + */ +void xe_vram_freq_sysfs_init(struct xe_tile *tile) +{ + struct xe_device *xe = tile_to_xe(tile); + struct kobject *kobj; + int err; + + if (xe->info.platform != XE_PVC) + return; + + kobj = kobject_create_and_add("memory", tile->sysfs); + if (!kobj) + drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM); + + err = sysfs_create_group(kobj, &freq_group_attrs); + if (err) { + kobject_put(kobj); + drm_warn(&xe->drm, "failed to register vram freq sysfs, err: %d\n", err); + return; + } + + err = drmm_add_action_or_reset(&xe->drm, vram_freq_sysfs_fini, kobj); + if (err) + drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n", + __func__, err); +} diff --git a/drivers/gpu/drm/xe/xe_vram_freq.h b/drivers/gpu/drm/xe/xe_vram_freq.h new file mode 100644 index 000000000000..cbe8c12fbd64 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_vram_freq.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_VRAM_FREQ_H_ +#define _XE_VRAM_FREQ_H_ + +struct xe_tile; + +void xe_vram_freq_sysfs_init(struct xe_tile *tile); + +#endif /* _XE_VRAM_FREQ_H_ */ diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 5f61dd87c586..a0264eedd443 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -9,7 +9,8 @@ #include <kunit/visibility.h> #include <linux/compiler_types.h> -#include "generated/xe_wa_oob.h" +#include <generated/xe_wa_oob.h> + #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" @@ -125,13 +126,6 @@ static const struct xe_rtp_entry_sr gt_was[] = { /* DG2 */ - { XE_RTP_NAME("16010515920"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), - GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(VIDEO_DECODE)), - XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F18(0), ALNUNIT_CLKGATE_DIS)), - XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), - }, { XE_RTP_NAME("22010523718"), XE_RTP_RULES(SUBPLATFORM(DG2, G10)), XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, CG3DDISCFEG_CLKGATE_DIS)) @@ -140,61 +134,6 @@ static const struct xe_rtp_entry_sr gt_was[] = { XE_RTP_RULES(SUBPLATFORM(DG2, G10)), XE_RTP_ACTIONS(SET(SUBSLICE_UNIT_LEVEL_CLKGATE, DSS_ROUTER_CLKGATE_DIS)) }, - { XE_RTP_NAME("14012362059"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB)) - }, - { XE_RTP_NAME("14012362059"), - XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB)) - }, - { XE_RTP_NAME("14010948348"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS)) - }, - { XE_RTP_NAME("14011037102"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(UNSLCGCTL9444, LTCDD_CLKGATE_DIS)) - }, - { XE_RTP_NAME("14011371254"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS)) - }, - { XE_RTP_NAME("14011431319"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(UNSLCGCTL9440, - GAMTLBOACS_CLKGATE_DIS | - GAMTLBVDBOX7_CLKGATE_DIS | GAMTLBVDBOX6_CLKGATE_DIS | - GAMTLBVDBOX5_CLKGATE_DIS | GAMTLBVDBOX4_CLKGATE_DIS | - GAMTLBVDBOX3_CLKGATE_DIS | GAMTLBVDBOX2_CLKGATE_DIS | - GAMTLBVDBOX1_CLKGATE_DIS | GAMTLBVDBOX0_CLKGATE_DIS | - GAMTLBKCR_CLKGATE_DIS | GAMTLBGUC_CLKGATE_DIS | - GAMTLBBLT_CLKGATE_DIS), - SET(UNSLCGCTL9444, - GAMTLBGFXA0_CLKGATE_DIS | GAMTLBGFXA1_CLKGATE_DIS | - GAMTLBCOMPA0_CLKGATE_DIS | GAMTLBCOMPA1_CLKGATE_DIS | - GAMTLBCOMPB0_CLKGATE_DIS | GAMTLBCOMPB1_CLKGATE_DIS | - GAMTLBCOMPC0_CLKGATE_DIS | GAMTLBCOMPC1_CLKGATE_DIS | - GAMTLBCOMPD0_CLKGATE_DIS | GAMTLBCOMPD1_CLKGATE_DIS | - GAMTLBMERT_CLKGATE_DIS | - GAMTLBVEBOX3_CLKGATE_DIS | GAMTLBVEBOX2_CLKGATE_DIS | - GAMTLBVEBOX1_CLKGATE_DIS | GAMTLBVEBOX0_CLKGATE_DIS)) - }, - { XE_RTP_NAME("14010569222"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, GAMEDIA_CLKGATE_DIS)) - }, - { XE_RTP_NAME("14011028019"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(SSMCGCTL9530, RTFUNIT_CLKGATE_DIS)) - }, - { XE_RTP_NAME("14010680813"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(XEHP_GAMSTLB_CTRL, - CONTROL_BLOCK_CLKGATE_DIS | - EGRESS_BLOCK_CLKGATE_DIS | - TAG_BLOCK_CLKGATE_DIS)) - }, { XE_RTP_NAME("14014830051"), XE_RTP_RULES(PLATFORM(DG2)), XE_RTP_ACTIONS(CLR(SARB_CHICKEN1, COMP_CKN_IN)) @@ -212,10 +151,6 @@ static const struct xe_rtp_entry_sr gt_was[] = { INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE)) }, - { XE_RTP_NAME("14010648519"), - XE_RTP_RULES(PLATFORM(DG2)), - XE_RTP_ACTIONS(SET(XEHP_L3NODEARBCFG, XEHP_LNESPARE)) - }, /* PVC */ @@ -377,13 +312,6 @@ static const struct xe_rtp_entry_sr engine_was[] = { POLYGON_TRIFAN_LINELOOP_DISABLE)) }, { XE_RTP_NAME("22012826095, 22013059131"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0), - FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW, - MAXREQS_PER_BANK, - REG_FIELD_PREP(MAXREQS_PER_BANK, 2))) - }, - { XE_RTP_NAME("22012826095, 22013059131"), XE_RTP_RULES(SUBPLATFORM(DG2, G11), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW, @@ -391,27 +319,10 @@ static const struct xe_rtp_entry_sr engine_was[] = { REG_FIELD_PREP(MAXREQS_PER_BANK, 2))) }, { XE_RTP_NAME("22013059131"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0), - FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT)) - }, - { XE_RTP_NAME("22013059131"), XE_RTP_RULES(SUBPLATFORM(DG2, G11), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT)) }, - { XE_RTP_NAME("14010918519"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0), - FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, - FORCE_SLM_FENCE_SCOPE_TO_TILE | - FORCE_UGM_FENCE_SCOPE_TO_TILE, - /* - * Ignore read back as it always returns 0 in these - * steps - */ - .read_mask = 0)) - }, { XE_RTP_NAME("14015227452"), XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)), @@ -428,22 +339,12 @@ static const struct xe_rtp_entry_sr engine_was[] = { FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3)) }, - { XE_RTP_NAME("16011620976, 22015475538"), + { XE_RTP_NAME("22015475538"), XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8)) }, { XE_RTP_NAME("22012654132"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0), - FUNC(xe_rtp_match_first_render_or_compute)), - XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC, - /* - * Register can't be read back for verification on - * DG2 due to Wa_14012342262 - */ - .read_mask = 0)) - }, - { XE_RTP_NAME("22012654132"), XE_RTP_RULES(SUBPLATFORM(DG2, G11), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC, @@ -461,68 +362,11 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_READ_SUPPRESSION)) }, - { XE_RTP_NAME("14013392000"), - XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(ROW_CHICKEN2, ENABLE_LARGE_GRF_MODE)) - }, - { XE_RTP_NAME("14012419201"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(ROW_CHICKEN4, - DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX)) - }, - { XE_RTP_NAME("14012419201"), - XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(ROW_CHICKEN4, - DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX)) - }, - { XE_RTP_NAME("1308578152"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0), - ENGINE_CLASS(RENDER), - FUNC(xe_rtp_match_first_gslice_fused_off)), - XE_RTP_ACTIONS(CLR(CS_DEBUG_MODE1(RENDER_RING_BASE), - REPLAY_MODE_GRANULARITY)) - }, { XE_RTP_NAME("22010960976, 14013347512"), XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(CLR(XEHP_HDC_CHICKEN0, LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK)) }, - { XE_RTP_NAME("1608949956, 14010198302"), - XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(ROW_CHICKEN, - MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE)) - }, - { XE_RTP_NAME("22010430635"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(ROW_CHICKEN4, - DISABLE_GRF_CLEAR)) - }, - { XE_RTP_NAME("14013202645"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY)) - }, - { XE_RTP_NAME("14013202645"), - XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY)) - }, - { XE_RTP_NAME("22012532006"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, - DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA)) - }, - { XE_RTP_NAME("22012532006"), - XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0), - ENGINE_CLASS(RENDER)), - XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, - DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA)) - }, { XE_RTP_NAME("14015150844"), XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES, @@ -612,7 +456,10 @@ static const struct xe_rtp_entry_sr engine_was[] = { PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, - + { XE_RTP_NAME("16018610683"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE)) + }, {} }; @@ -652,21 +499,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = { /* DG2 */ - { XE_RTP_NAME("16011186671"), - XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(CLR(VFLSKPD, DIS_MULT_MISS_RD_SQUASH), - SET(VFLSKPD, DIS_OVER_FETCH_CACHE)) - }, - { XE_RTP_NAME("14010469329"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3, - XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE)) - }, - { XE_RTP_NAME("14010698770, 22010613112, 22010465075"), - XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), - XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3, - DISABLE_CPS_AWARE_COLOR_PIPE)) - }, { XE_RTP_NAME("16013271637"), XE_RTP_RULES(PLATFORM(DG2)), XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, @@ -708,6 +540,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)), XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE)) }, + { XE_RTP_NAME("14019877138"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) + }, /* Xe2_LPG */ @@ -739,6 +575,11 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) }, + { XE_RTP_NAME("16020183090"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0), + ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT)) + }, {} }; diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 727bdc429212..b138cbd51bdb 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -1,13 +1,8 @@ 22012773006 GRAPHICS_VERSION_RANGE(1200, 1250) -16011759253 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0) 14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0) PLATFORM(DG2) 22011391025 PLATFORM(DG2) -14012197797 PLATFORM(DG2), GRAPHICS_STEP(A0, B0) -16011777198 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0) - SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0) -22012727170 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0) - SUBPLATFORM(DG2, G11) +22012727170 SUBPLATFORM(DG2, G11) 22012727685 SUBPLATFORM(DG2, G11) 16015675438 PLATFORM(PVC) SUBPLATFORM(DG2, G10) @@ -22,3 +17,8 @@ 14019821291 MEDIA_VERSION_RANGE(1300, 2000) 14015076503 MEDIA_VERSION(1300) 16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0) +14018913170 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0) + MEDIA_VERSION(2000), GRAPHICS_STEP(A0, A1) + GRAPHICS_VERSION_RANGE(1270, 1274) + MEDIA_VERSION(1300) + PLATFORM(DG2) diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index a75eeba7bfe5..f69721339201 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -148,7 +148,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, if (q) { if (q->ops->reset_status(q)) { - drm_info(&xe->drm, "exec gueue reset detected\n"); + drm_info(&xe->drm, "exec queue reset detected\n"); err = -EIO; break; } diff --git a/drivers/gpu/drm/xe/xe_wopcm_types.h b/drivers/gpu/drm/xe/xe_wopcm_types.h index 486d850c4084..99d34837c408 100644 --- a/drivers/gpu/drm/xe/xe_wopcm_types.h +++ b/drivers/gpu/drm/xe/xe_wopcm_types.h @@ -16,9 +16,9 @@ struct xe_wopcm { u32 size; /** @guc: GuC WOPCM Region info */ struct { - /** @base: GuC WOPCM base which is offset from WOPCM base */ + /** @guc.base: GuC WOPCM base which is offset from WOPCM base */ u32 base; - /** @size: Size of the GuC WOPCM region */ + /** @guc.size: Size of the GuC WOPCM region */ u32 size; } guc; }; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 9f48e5bbcdec..1846c4971fd8 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -1560,12 +1560,12 @@ disconnected: return connector_status_disconnected; } -static struct edid *zynqmp_dp_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) +static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) { struct zynqmp_dp *dp = bridge_to_dp(bridge); - return drm_get_edid(connector, &dp->aux.ddc); + return drm_edid_read_ddc(connector, &dp->aux.ddc); } static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { @@ -1579,7 +1579,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_check = zynqmp_dp_bridge_atomic_check, .detect = zynqmp_dp_bridge_detect, - .get_edid = zynqmp_dp_bridge_get_edid, + .edid_read = zynqmp_dp_bridge_edid_read, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 84d042796d2e..783975d1384f 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -365,7 +365,7 @@ static const struct dev_pm_ops host1x_device_pm_ops = { .restore = pm_generic_restore, }; -struct bus_type host1x_bus_type = { +const struct bus_type host1x_bus_type = { .name = "host1x", .match = host1x_device_match, .uevent = host1x_device_uevent, diff --git a/drivers/gpu/host1x/bus.h b/drivers/gpu/host1x/bus.h index a4adf9abc3b4..a80ceadfeb34 100644 --- a/drivers/gpu/host1x/bus.h +++ b/drivers/gpu/host1x/bus.h @@ -10,7 +10,7 @@ struct bus_type; struct host1x; -extern struct bus_type host1x_bus_type; +extern const struct bus_type host1x_bus_type; int host1x_register(struct host1x *host1x); int host1x_unregister(struct host1x *host1x); diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c index d1336e438f4f..407ed9b9cf64 100644 --- a/drivers/gpu/host1x/cdma.c +++ b/drivers/gpu/host1x/cdma.c @@ -625,8 +625,7 @@ void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2, struct host1x_channel *channel = cdma_to_channel(cdma); struct host1x *host1x = cdma_to_host1x(cdma); struct push_buffer *pb = &cdma->push_buffer; - unsigned int space = cdma->slots_free; - unsigned int needed = 2, extra = 0; + unsigned int space, needed = 2, extra = 0; if (host1x_debug_trace_cmdbuf) trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2, |