diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
44 files changed, 10724 insertions, 124 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 429f18b99323..9da5b0bb66d8 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -239,7 +239,7 @@ static int pp_set_powergating_state(void *handle, /* Enable/disable GFX per cu powergating through SMU */ return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr, - state == AMD_PG_STATE_GATE ? true : false); + state == AMD_PG_STATE_GATE); } static int pp_suspend(void *handle) @@ -286,7 +286,7 @@ static int pp_resume(void *handle) } if (ret1 == PP_DPM_DISABLED) - return ret1; + return 0; eventmgr = pp_handle->eventmgr; @@ -341,8 +341,9 @@ static int pp_dpm_force_performance_level(void *handle, return 0; } + mutex_lock(&pp_handle->pp_lock); hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); - + mutex_unlock(&pp_handle->pp_lock); return 0; } @@ -352,6 +353,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + enum amd_dpm_forced_level level; ret = pp_check(pp_handle); @@ -359,8 +361,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( return ret; hwmgr = pp_handle->hwmgr; - - return hwmgr->dpm_level; + mutex_lock(&pp_handle->pp_lock); + level = hwmgr->dpm_level; + mutex_unlock(&pp_handle->pp_lock); + return level; } static int pp_dpm_get_sclk(void *handle, bool low) @@ -380,8 +384,10 @@ static int pp_dpm_get_sclk(void *handle, bool low) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->get_sclk(hwmgr, low); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_mclk(void *handle, bool low) @@ -401,8 +407,10 @@ static int pp_dpm_get_mclk(void *handle, bool low) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->get_mclk(hwmgr, low); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_powergate_vce(void *handle, bool gate) @@ -422,8 +430,10 @@ static int pp_dpm_powergate_vce(void *handle, bool gate) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_powergate_uvd(void *handle, bool gate) @@ -443,8 +453,10 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) @@ -472,7 +484,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, if (ret != 0) return ret; - + mutex_lock(&pp_handle->pp_lock); switch (event_id) { case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE: ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); @@ -481,8 +493,10 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, { enum amd_pm_state_type ps; - if (input == NULL) - return -EINVAL; + if (input == NULL) { + ret = -EINVAL; + break; + } ps = *(unsigned long *)input; data.requested_ui_label = power_state_convert(ps); @@ -498,6 +512,7 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, default: break; } + mutex_unlock(&pp_handle->pp_lock); return ret; } @@ -507,6 +522,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) struct pp_power_state *state; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + enum amd_pm_state_type pm_type; ret = pp_check(pp_handle); @@ -518,21 +534,30 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) if (hwmgr->current_ps == NULL) return -EINVAL; + mutex_lock(&pp_handle->pp_lock); + state = hwmgr->current_ps; switch (state->classification.ui_label) { case PP_StateUILabel_Battery: - return POWER_STATE_TYPE_BATTERY; + pm_type = POWER_STATE_TYPE_BATTERY; + break; case PP_StateUILabel_Balanced: - return POWER_STATE_TYPE_BALANCED; + pm_type = POWER_STATE_TYPE_BALANCED; + break; case PP_StateUILabel_Performance: - return POWER_STATE_TYPE_PERFORMANCE; + pm_type = POWER_STATE_TYPE_PERFORMANCE; + break; default: if (state->classification.flags & PP_StateClassificationFlag_Boot) - return POWER_STATE_TYPE_INTERNAL_BOOT; + pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; else - return POWER_STATE_TYPE_DEFAULT; + pm_type = POWER_STATE_TYPE_DEFAULT; + break; } + mutex_unlock(&pp_handle->pp_lock); + + return pm_type; } static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) @@ -552,8 +577,10 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_fan_control_mode(void *handle) @@ -573,8 +600,10 @@ static int pp_dpm_get_fan_control_mode(void *handle) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) @@ -594,8 +623,10 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) @@ -616,7 +647,10 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) return 0; } - return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) @@ -635,7 +669,10 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) return -EINVAL; - return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_temperature(void *handle) @@ -655,8 +692,10 @@ static int pp_dpm_get_temperature(void *handle) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->get_temperature(hwmgr); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_temperature(hwmgr); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_pp_num_states(void *handle, @@ -677,6 +716,8 @@ static int pp_dpm_get_pp_num_states(void *handle, if (hwmgr->ps == NULL) return -EINVAL; + mutex_lock(&pp_handle->pp_lock); + data->nums = hwmgr->num_ps; for (i = 0; i < hwmgr->num_ps; i++) { @@ -699,7 +740,7 @@ static int pp_dpm_get_pp_num_states(void *handle, data->states[i] = POWER_STATE_TYPE_DEFAULT; } } - + mutex_unlock(&pp_handle->pp_lock); return 0; } @@ -708,6 +749,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table) struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; + int size = 0; ret = pp_check(pp_handle); @@ -719,9 +761,11 @@ static int pp_dpm_get_pp_table(void *handle, char **table) if (!hwmgr->soft_pp_table) return -EINVAL; + mutex_lock(&pp_handle->pp_lock); *table = (char *)hwmgr->soft_pp_table; - - return hwmgr->soft_pp_table_size; + size = hwmgr->soft_pp_table_size; + mutex_unlock(&pp_handle->pp_lock); + return size; } static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) @@ -736,21 +780,33 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) return ret; hwmgr = pp_handle->hwmgr; - + mutex_lock(&pp_handle->pp_lock); if (!hwmgr->hardcode_pp_table) { hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, hwmgr->soft_pp_table_size, GFP_KERNEL); - - if (!hwmgr->hardcode_pp_table) + if (!hwmgr->hardcode_pp_table) { + mutex_unlock(&pp_handle->pp_lock); return -ENOMEM; + } } memcpy(hwmgr->hardcode_pp_table, buf, size); hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; + mutex_unlock(&pp_handle->pp_lock); - return amd_powerplay_reset(handle); + ret = amd_powerplay_reset(handle); + if (ret) + return ret; + + if (hwmgr->hwmgr_func->avfs_control) { + ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); + if (ret) + return ret; + } + + return 0; } static int pp_dpm_force_clock_level(void *handle, @@ -771,8 +827,10 @@ static int pp_dpm_force_clock_level(void *handle, pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); + mutex_lock(&pp_handle->pp_lock); + hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_print_clock_levels(void *handle, @@ -793,7 +851,10 @@ static int pp_dpm_print_clock_levels(void *handle, pr_info("%s was not implemented.\n", __func__); return 0; } - return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_sclk_od(void *handle) @@ -813,8 +874,10 @@ static int pp_dpm_get_sclk_od(void *handle) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->get_sclk_od(hwmgr); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_set_sclk_od(void *handle, uint32_t value) @@ -835,7 +898,10 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) return 0; } - return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_get_mclk_od(void *handle) @@ -855,8 +921,10 @@ static int pp_dpm_get_mclk_od(void *handle) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->get_mclk_od(hwmgr); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr); + mutex_unlock(&pp_handle->pp_lock); + return ret; } static int pp_dpm_set_mclk_od(void *handle, uint32_t value) @@ -876,11 +944,14 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) pr_info("%s was not implemented.\n", __func__); return 0; } - - return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); + mutex_unlock(&pp_handle->pp_lock); + return ret; } -static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) +static int pp_dpm_read_sensor(void *handle, int idx, + void *value, int *size) { struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; @@ -898,7 +969,11 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) return 0; } - return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); + mutex_lock(&pp_handle->pp_lock); + ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); + mutex_unlock(&pp_handle->pp_lock); + + return ret; } static struct amd_vce_state* @@ -917,10 +992,140 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) if (hwmgr && idx < hwmgr->num_vce_state_tables) return &hwmgr->vce_states[idx]; - return NULL; } +static int pp_dpm_reset_power_profile_state(void *handle, + struct amd_pp_profile *request) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + + if (!request || pp_check(pp_handle)) + return -EINVAL; + + hwmgr = pp_handle->hwmgr; + + if (hwmgr->hwmgr_func->set_power_profile_state == NULL) { + pr_info("%s was not implemented.\n", __func__); + return 0; + } + + if (request->type == AMD_PP_GFX_PROFILE) { + hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile; + return hwmgr->hwmgr_func->set_power_profile_state(hwmgr, + &hwmgr->gfx_power_profile); + } else if (request->type == AMD_PP_COMPUTE_PROFILE) { + hwmgr->compute_power_profile = + hwmgr->default_compute_power_profile; + return hwmgr->hwmgr_func->set_power_profile_state(hwmgr, + &hwmgr->compute_power_profile); + } else + return -EINVAL; +} + +static int pp_dpm_get_power_profile_state(void *handle, + struct amd_pp_profile *query) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + + if (!query || pp_check(pp_handle)) + return -EINVAL; + + hwmgr = pp_handle->hwmgr; + + if (query->type == AMD_PP_GFX_PROFILE) + memcpy(query, &hwmgr->gfx_power_profile, + sizeof(struct amd_pp_profile)); + else if (query->type == AMD_PP_COMPUTE_PROFILE) + memcpy(query, &hwmgr->compute_power_profile, + sizeof(struct amd_pp_profile)); + else + return -EINVAL; + + return 0; +} + +static int pp_dpm_set_power_profile_state(void *handle, + struct amd_pp_profile *request) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = -1; + + if (!request || pp_check(pp_handle)) + return -EINVAL; + + hwmgr = pp_handle->hwmgr; + + if (hwmgr->hwmgr_func->set_power_profile_state == NULL) { + pr_info("%s was not implemented.\n", __func__); + return 0; + } + + if (request->min_sclk || + request->min_mclk || + request->activity_threshold || + request->up_hyst || + request->down_hyst) { + if (request->type == AMD_PP_GFX_PROFILE) + memcpy(&hwmgr->gfx_power_profile, request, + sizeof(struct amd_pp_profile)); + else if (request->type == AMD_PP_COMPUTE_PROFILE) + memcpy(&hwmgr->compute_power_profile, request, + sizeof(struct amd_pp_profile)); + else + return -EINVAL; + + if (request->type == hwmgr->current_power_profile) + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + request); + } else { + /* set power profile if it exists */ + switch (request->type) { + case AMD_PP_GFX_PROFILE: + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + &hwmgr->gfx_power_profile); + break; + case AMD_PP_COMPUTE_PROFILE: + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + &hwmgr->compute_power_profile); + break; + default: + return -EINVAL; + } + } + + if (!ret) + hwmgr->current_power_profile = request->type; + + return 0; +} + +static int pp_dpm_switch_power_profile(void *handle, + enum amd_pp_profile_type type) +{ + struct pp_hwmgr *hwmgr; + struct amd_pp_profile request = {0}; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + + if (pp_check(pp_handle)) + return -EINVAL; + + hwmgr = pp_handle->hwmgr; + + if (hwmgr->current_power_profile != type) { + request.type = type; + pp_dpm_set_power_profile_state(handle, &request); + } + + return 0; +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -949,6 +1154,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .set_mclk_od = pp_dpm_set_mclk_od, .read_sensor = pp_dpm_read_sensor, .get_vce_clock_state = pp_dpm_get_vce_clock_state, + .reset_power_profile_state = pp_dpm_reset_power_profile_state, + .get_power_profile_state = pp_dpm_get_power_profile_state, + .set_power_profile_state = pp_dpm_set_power_profile_state, + .switch_power_profile = pp_dpm_switch_power_profile, }; int amd_powerplay_create(struct amd_pp_init *pp_init, @@ -969,8 +1178,8 @@ int amd_powerplay_create(struct amd_pp_init *pp_init, instance->pm_en = pp_init->pm_en; instance->feature_mask = pp_init->feature_mask; instance->device = pp_init->device; + mutex_init(&instance->pp_lock); *handle = instance; - return 0; } @@ -1041,9 +1250,9 @@ int amd_powerplay_display_configuration_change(void *handle, return ret; hwmgr = pp_handle->hwmgr; - + mutex_lock(&pp_handle->pp_lock); phm_store_dal_configuration_data(hwmgr, display_config); - + mutex_unlock(&pp_handle->pp_lock); return 0; } @@ -1064,7 +1273,10 @@ int amd_powerplay_get_display_power_level(void *handle, if (output == NULL) return -EINVAL; - return phm_get_dal_power_level(hwmgr, output); + mutex_lock(&pp_handle->pp_lock); + ret = phm_get_dal_power_level(hwmgr, output); + mutex_unlock(&pp_handle->pp_lock); + return ret; } int amd_powerplay_get_current_clocks(void *handle, @@ -1083,14 +1295,22 @@ int amd_powerplay_get_current_clocks(void *handle, hwmgr = pp_handle->hwmgr; + mutex_lock(&pp_handle->pp_lock); + phm_get_dal_power_level(hwmgr, &simple_clocks); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment)) - PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1); - } else { - if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity)) - PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) + ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, + &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment); + else + ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, + &hw_clocks, PHM_PerformanceLevelDesignation_Activity); + + if (ret != 0) { + pr_info("Error in phm_get_clock_info \n"); + mutex_unlock(&pp_handle->pp_lock); + return -EINVAL; } clocks->min_engine_clock = hw_clocks.min_eng_clk; @@ -1109,14 +1329,12 @@ int amd_powerplay_get_current_clocks(void *handle, clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; } - + mutex_unlock(&pp_handle->pp_lock); return 0; - } int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) { - int result = -1; struct pp_hwmgr *hwmgr; struct pp_instance *pp_handle = (struct pp_instance *)handle; int ret = 0; @@ -1131,9 +1349,104 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s if (clocks == NULL) return -EINVAL; - result = phm_get_clock_by_type(hwmgr, type, clocks); + mutex_lock(&pp_handle->pp_lock); + ret = phm_get_clock_by_type(hwmgr, type, clocks); + mutex_unlock(&pp_handle->pp_lock); + return ret; +} - return result; +int amd_powerplay_get_clock_by_type_with_latency(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; + + ret = pp_check(pp_handle); + if (ret != 0) + return ret; + + if (!clocks) + return -EINVAL; + + mutex_lock(&pp_handle->pp_lock); + hwmgr = ((struct pp_instance *)handle)->hwmgr; + ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks); + mutex_unlock(&pp_handle->pp_lock); + return ret; +} + +int amd_powerplay_get_clock_by_type_with_voltage(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; + + ret = pp_check(pp_handle); + if (ret != 0) + return ret; + + if (!clocks) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + mutex_lock(&pp_handle->pp_lock); + + ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); + + mutex_unlock(&pp_handle->pp_lock); + return ret; +} + +int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; + + ret = pp_check(pp_handle); + if (ret != 0) + return ret; + + if (!wm_with_clock_ranges) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + mutex_lock(&pp_handle->pp_lock); + ret = phm_set_watermarks_for_clocks_ranges(hwmgr, + wm_with_clock_ranges); + mutex_unlock(&pp_handle->pp_lock); + + return ret; +} + +int amd_powerplay_display_clock_voltage_request(void *handle, + struct pp_display_clock_request *clock) +{ + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; + + ret = pp_check(pp_handle); + if (ret != 0) + return ret; + + if (!clock) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + mutex_lock(&pp_handle->pp_lock); + ret = phm_display_clock_voltage_request(hwmgr, clock); + mutex_unlock(&pp_handle->pp_lock); + + return ret; } int amd_powerplay_get_display_mode_validation_clocks(void *handle, @@ -1150,13 +1463,15 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle, hwmgr = pp_handle->hwmgr; - if (clocks == NULL) return -EINVAL; + mutex_lock(&pp_handle->pp_lock); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) ret = phm_get_max_high_clocks(hwmgr, clocks); + mutex_unlock(&pp_handle->pp_lock); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index 5fff1d636ab7..27db2b77824f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -5,9 +5,11 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ cz_clockpowergating.o pppcielanes.o\ - process_pptables_v1_0.o ppatomctrl.o \ + process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ - smu7_clockpowergating.o + smu7_clockpowergating.o \ + vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ + vega10_thermal.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index a4cde3d778b8..7aa5ca815a3a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1813,7 +1813,8 @@ static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr) return actual_temp; } -static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) +static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, + void *value, int *size) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); @@ -1837,11 +1838,16 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) uint16_t vddnb, vddgfx; int result; + /* size must be at least 4 bytes for all sensors */ + if (*size < 4) + return -EINVAL; + *size = 4; + switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: if (sclk_index < NUM_SCLK_LEVELS) { sclk = table->entries[sclk_index].clk; - *value = sclk; + *((uint32_t *)value) = sclk; return 0; } return -EINVAL; @@ -1849,13 +1855,13 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); - *value = vddnb; + *((uint32_t *)value) = vddnb; return 0; case AMDGPU_PP_SENSOR_VDDGFX: tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); - *value = vddgfx; + *((uint32_t *)value) = vddgfx; return 0; case AMDGPU_PP_SENSOR_UVD_VCLK: if (!cz_hwmgr->uvd_power_gated) { @@ -1863,11 +1869,11 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) return -EINVAL; } else { vclk = uvd_table->entries[uvd_index].vclk; - *value = vclk; + *((uint32_t *)value) = vclk; return 0; } } - *value = 0; + *((uint32_t *)value) = 0; return 0; case AMDGPU_PP_SENSOR_UVD_DCLK: if (!cz_hwmgr->uvd_power_gated) { @@ -1875,11 +1881,11 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) return -EINVAL; } else { dclk = uvd_table->entries[uvd_index].dclk; - *value = dclk; + *((uint32_t *)value) = dclk; return 0; } } - *value = 0; + *((uint32_t *)value) = 0; return 0; case AMDGPU_PP_SENSOR_VCE_ECCLK: if (!cz_hwmgr->vce_power_gated) { @@ -1887,11 +1893,11 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) return -EINVAL; } else { ecclk = vce_table->entries[vce_index].ecclk; - *value = ecclk; + *((uint32_t *)value) = ecclk; return 0; } } - *value = 0; + *((uint32_t *)value) = 0; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); @@ -1901,16 +1907,16 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) } else { activity_percent = 50; } - *value = activity_percent; + *((uint32_t *)value) = activity_percent; return 0; case AMDGPU_PP_SENSOR_UVD_POWER: - *value = cz_hwmgr->uvd_power_gated ? 0 : 1; + *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1; return 0; case AMDGPU_PP_SENSOR_VCE_POWER: - *value = cz_hwmgr->vce_power_gated ? 0 : 1; + *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1; return 0; case AMDGPU_PP_SENSOR_GPU_TEMP: - *value = cz_thermal_get_temperature(hwmgr); + *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr); return 0; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 0eb8e886bf35..23bba2c8b18e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -113,7 +113,7 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) NULL, NULL); } - enabled = ret == 0 ? true : false; + enabled = ret == 0; cgs_notify_dpm_enabled(hwmgr->device, enabled); @@ -146,12 +146,28 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { + int ret = 0; + PHM_FUNC_CHECK(hwmgr); - if (hwmgr->hwmgr_func->force_dpm_level != NULL) - return hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + if (hwmgr->hwmgr_func->force_dpm_level != NULL) { + ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); + if (ret) + return ret; + + if (hwmgr->hwmgr_func->set_power_profile_state) { + if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE) + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + &hwmgr->gfx_power_profile); + else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE) + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + &hwmgr->compute_power_profile); + } + } - return 0; + return ret; } int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, @@ -427,6 +443,55 @@ int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, s } +int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks); + +} + +int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL) + return -EINVAL; + + return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks); + +} + +int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + PHM_FUNC_CHECK(hwmgr); + + if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges) + return -EINVAL; + + return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, + wm_with_clock_ranges); +} + +int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock) +{ + PHM_FUNC_CHECK(hwmgr); + + if (!hwmgr->hwmgr_func->display_clock_voltage_request) + return -EINVAL; + + return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock); +} + int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) { PHM_FUNC_CHECK(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 2ea9c0e78689..ff4ae3de6bb6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -106,6 +106,15 @@ int hwmgr_early_init(struct pp_instance *handle) } smu7_init_function_pointers(hwmgr); break; + case AMDGPU_FAMILY_AI: + switch (hwmgr->chip_id) { + case CHIP_VEGA10: + vega10_hwmgr_init(hwmgr); + break; + default: + return -EINVAL; + } + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h index 2930a3355948..c0193e09d58a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h @@ -30,15 +30,17 @@ struct phm_ppt_v1_clock_voltage_dependency_record { uint32_t clk; - uint8_t vddInd; + uint8_t vddInd; + uint8_t vddciInd; + uint8_t mvddInd; uint16_t vdd_offset; uint16_t vddc; uint16_t vddgfx; uint16_t vddci; uint16_t mvdd; - uint8_t phases; - uint8_t cks_enable; - uint8_t cks_voffset; + uint8_t phases; + uint8_t cks_enable; + uint8_t cks_voffset; uint32_t sclk_offset; }; @@ -94,6 +96,7 @@ struct phm_ppt_v1_pcie_record { uint8_t gen_speed; uint8_t lane_width; uint16_t usreserved; + uint16_t reserved; uint32_t pcie_sclk; }; typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record; @@ -104,5 +107,10 @@ struct phm_ppt_v1_pcie_table { }; typedef struct phm_ppt_v1_pcie_table phm_ppt_v1_pcie_table; +struct phm_ppt_v1_gpio_table { + uint8_t vrhot_triggered_sclk_dpm_index; /* SCLK DPM level index to switch to when VRHot is triggered */ +}; +typedef struct phm_ppt_v1_gpio_table phm_ppt_v1_gpio_table; + #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c new file mode 100644 index 000000000000..b71525f838e6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -0,0 +1,396 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "pp_debug.h" + + +static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4( + const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table, + uint8_t voltage_type, uint8_t voltage_mode) +{ + unsigned int size = le16_to_cpu( + voltage_object_info_table->table_header.structuresize); + unsigned int offset = + offsetof(struct atom_voltage_objects_info_v4_1, voltage_object[0]); + unsigned long start = (unsigned long)voltage_object_info_table; + + while (offset < size) { + const union atom_voltage_object_v4 *voltage_object = + (const union atom_voltage_object_v4 *)(start + offset); + + if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type && + voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode) + return voltage_object; + + offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size); + + } + + return NULL; +} + +static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table( + struct pp_hwmgr *hwmgr) +{ + const void *table_address; + uint16_t idx; + + idx = GetIndexIntoMasterDataTable(voltageobject_info); + table_address = cgs_atom_get_data_table(hwmgr->device, + idx, NULL, NULL, NULL); + + PP_ASSERT_WITH_CODE( + table_address, + "Error retrieving BIOS Table Address!", + return NULL); + + return (struct atom_voltage_objects_info_v4_1 *)table_address; +} + +/** +* Returns TRUE if the given voltage type is controlled by GPIO pins. +* voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC, SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ. +* voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE +*/ +bool pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(struct pp_hwmgr *hwmgr, + uint8_t voltage_type, uint8_t voltage_mode) +{ + struct atom_voltage_objects_info_v4_1 *voltage_info = + (struct atom_voltage_objects_info_v4_1 *) + pp_atomfwctrl_get_voltage_info_table(hwmgr); + bool ret; + + /* If we cannot find the table do NOT try to control this voltage. */ + PP_ASSERT_WITH_CODE(voltage_info, + "Could not find Voltage Table in BIOS.", + return false); + + ret = (pp_atomfwctrl_lookup_voltage_type_v4(voltage_info, + voltage_type, voltage_mode)) ? true : false; + + return ret; +} + +int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, + uint8_t voltage_type, uint8_t voltage_mode, + struct pp_atomfwctrl_voltage_table *voltage_table) +{ + struct atom_voltage_objects_info_v4_1 *voltage_info = + (struct atom_voltage_objects_info_v4_1 *) + pp_atomfwctrl_get_voltage_info_table(hwmgr); + const union atom_voltage_object_v4 *voltage_object; + unsigned int i; + int result = 0; + + PP_ASSERT_WITH_CODE(voltage_info, + "Could not find Voltage Table in BIOS.", + return -1); + + voltage_object = pp_atomfwctrl_lookup_voltage_type_v4(voltage_info, + voltage_type, voltage_mode); + + if (!voltage_object) + return -1; + + voltage_table->count = 0; + if (voltage_mode == VOLTAGE_OBJ_GPIO_LUT) { + PP_ASSERT_WITH_CODE( + (voltage_object->gpio_voltage_obj.gpio_entry_num <= + PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES), + "Too many voltage entries!", + result = -1); + + if (!result) { + for (i = 0; i < voltage_object->gpio_voltage_obj. + gpio_entry_num; i++) { + voltage_table->entries[i].value = + le16_to_cpu(voltage_object->gpio_voltage_obj. + voltage_gpio_lut[i].voltage_level_mv); + voltage_table->entries[i].smio_low = + le32_to_cpu(voltage_object->gpio_voltage_obj. + voltage_gpio_lut[i].voltage_gpio_reg_val); + } + voltage_table->count = + voltage_object->gpio_voltage_obj.gpio_entry_num; + voltage_table->mask_low = + le32_to_cpu( + voltage_object->gpio_voltage_obj.gpio_mask_val); + voltage_table->phase_delay = + voltage_object->gpio_voltage_obj.phase_delay_us; + } + } else if (voltage_mode == VOLTAGE_OBJ_SVID2) { + voltage_table->psi1_enable = + voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1; + voltage_table->psi0_enable = + voltage_object->svid2_voltage_obj.psi0_enable & 0x1; + voltage_table->max_vid_step = + voltage_object->svid2_voltage_obj.maxvstep; + voltage_table->telemetry_offset = + voltage_object->svid2_voltage_obj.telemetry_offset; + voltage_table->telemetry_slope = + voltage_object->svid2_voltage_obj.telemetry_gain; + } else + PP_ASSERT_WITH_CODE(false, + "Unsupported Voltage Object Mode!", + result = -1); + + return result; +} + + +static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table( + struct pp_hwmgr *hwmgr) +{ + const void *table_address; + uint16_t idx; + + idx = GetIndexIntoMasterDataTable(gpio_pin_lut); + table_address = cgs_atom_get_data_table(hwmgr->device, + idx, NULL, NULL, NULL); + PP_ASSERT_WITH_CODE(table_address, + "Error retrieving BIOS Table Address!", + return NULL); + + return (struct atom_gpio_pin_lut_v2_1 *)table_address; +} + +static bool pp_atomfwctrl_lookup_gpio_pin( + struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table, + const uint32_t pin_id, + struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment) +{ + unsigned int size = le16_to_cpu( + gpio_lookup_table->table_header.structuresize); + unsigned int offset = + offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]); + unsigned long start = (unsigned long)gpio_lookup_table; + + while (offset < size) { + const struct atom_gpio_pin_assignment *pin_assignment = + (const struct atom_gpio_pin_assignment *)(start + offset); + + if (pin_id == pin_assignment->gpio_id) { + gpio_pin_assignment->uc_gpio_pin_bit_shift = + pin_assignment->gpio_bitshift; + gpio_pin_assignment->us_gpio_pin_aindex = + le16_to_cpu(pin_assignment->data_a_reg_index); + return true; + } + offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1; + } + return false; +} + +/** +* Returns TRUE if the given pin id find in lookup table. +*/ +bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, + const uint32_t pin_id, + struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment) +{ + bool ret = false; + struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table = + pp_atomfwctrl_get_gpio_lookup_table(hwmgr); + + /* If we cannot find the table do NOT try to control this voltage. */ + PP_ASSERT_WITH_CODE(gpio_lookup_table, + "Could not find GPIO lookup Table in BIOS.", + return false); + + ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table, + pin_id, gpio_pin_assignment); + + return ret; +} + +/** +* Enter to SelfRefresh mode. +* @param hwmgr +*/ +int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr) +{ + /* 0 - no action + * 1 - leave power to video memory always on + */ + return 0; +} + +/** pp_atomfwctrl_get_gpu_pll_dividers_vega10(). + * + * @param hwmgr input parameter: pointer to HwMgr + * @param clock_type input parameter: Clock type: 1 - GFXCLK, 2 - UCLK, 0 - All other clocks + * @param clock_value input parameter: Clock + * @param dividers output parameter:Clock dividers + */ +int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, + uint32_t clock_type, uint32_t clock_value, + struct pp_atomfwctrl_clock_dividers_soc15 *dividers) +{ + struct compute_gpu_clock_input_parameter_v1_8 pll_parameters; + struct compute_gpu_clock_output_parameter_v1_8 *pll_output; + int result; + uint32_t idx; + + pll_parameters.gpuclock_10khz = (uint32_t)clock_value; + pll_parameters.gpu_clock_type = clock_type; + + idx = GetIndexIntoMasterCmdTable(computegpuclockparam); + result = cgs_atom_exec_cmd_table(hwmgr->device, idx, &pll_parameters); + + if (!result) { + pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) + &pll_parameters; + dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz); + dividers->ulDid = le32_to_cpu(pll_output->dfs_did); + dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult); + dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult); + dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac); + dividers->ucPll_ss_enable = pll_output->pll_ss_enable; + } + return result; +} + +int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_avfs_parameters *param) +{ + uint16_t idx; + struct atom_asic_profiling_info_v4_1 *profile; + + idx = GetIndexIntoMasterDataTable(asic_profiling_info); + profile = (struct atom_asic_profiling_info_v4_1 *) + cgs_atom_get_data_table(hwmgr->device, + idx, NULL, NULL, NULL); + + if (!profile) + return -1; + + param->ulMaxVddc = le32_to_cpu(profile->maxvddc); + param->ulMinVddc = le32_to_cpu(profile->minvddc); + param->ulMeanNsigmaAcontant0 = + le32_to_cpu(profile->avfs_meannsigma_acontant0); + param->ulMeanNsigmaAcontant1 = + le32_to_cpu(profile->avfs_meannsigma_acontant1); + param->ulMeanNsigmaAcontant2 = + le32_to_cpu(profile->avfs_meannsigma_acontant2); + param->usMeanNsigmaDcTolSigma = + le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma); + param->usMeanNsigmaPlatformMean = + le16_to_cpu(profile->avfs_meannsigma_platform_mean); + param->usMeanNsigmaPlatformSigma = + le16_to_cpu(profile->avfs_meannsigma_platform_sigma); + param->ulGbVdroopTableCksoffA0 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a0); + param->ulGbVdroopTableCksoffA1 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a1); + param->ulGbVdroopTableCksoffA2 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a2); + param->ulGbVdroopTableCksonA0 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a0); + param->ulGbVdroopTableCksonA1 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a1); + param->ulGbVdroopTableCksonA2 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a2); + param->ulGbFuseTableCksoffM1 = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1); + param->usGbFuseTableCksoffM2 = + le16_to_cpu(profile->avfsgb_fuse_table_cksoff_m2); + param->ulGbFuseTableCksoffB = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b); + param->ulGbFuseTableCksonM1 = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1); + param->usGbFuseTableCksonM2 = + le16_to_cpu(profile->avfsgb_fuse_table_ckson_m2); + param->ulGbFuseTableCksonB = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_b); + param->usMaxVoltage025mv = + le16_to_cpu(profile->max_voltage_0_25mv); + param->ucEnableGbVdroopTableCksoff = + profile->enable_gb_vdroop_table_cksoff; + param->ucEnableGbVdroopTableCkson = + profile->enable_gb_vdroop_table_ckson; + param->ucEnableGbFuseTableCksoff = + profile->enable_gb_fuse_table_cksoff; + param->ucEnableGbFuseTableCkson = + profile->enable_gb_fuse_table_ckson; + param->usPsmAgeComfactor = + le16_to_cpu(profile->psm_age_comfactor); + param->ucEnableApplyAvfsCksoffVoltage = + profile->enable_apply_avfs_cksoff_voltage; + + param->ulDispclk2GfxclkM1 = + le32_to_cpu(profile->dispclk2gfxclk_a); + param->usDispclk2GfxclkM2 = + le16_to_cpu(profile->dispclk2gfxclk_b); + param->ulDispclk2GfxclkB = + le32_to_cpu(profile->dispclk2gfxclk_c); + param->ulDcefclk2GfxclkM1 = + le32_to_cpu(profile->dcefclk2gfxclk_a); + param->usDcefclk2GfxclkM2 = + le16_to_cpu(profile->dcefclk2gfxclk_b); + param->ulDcefclk2GfxclkB = + le32_to_cpu(profile->dcefclk2gfxclk_c); + param->ulPixelclk2GfxclkM1 = + le32_to_cpu(profile->pixclk2gfxclk_a); + param->usPixelclk2GfxclkM2 = + le16_to_cpu(profile->pixclk2gfxclk_b); + param->ulPixelclk2GfxclkB = + le32_to_cpu(profile->pixclk2gfxclk_c); + param->ulPhyclk2GfxclkM1 = + le32_to_cpu(profile->phyclk2gfxclk_a); + param->usPhyclk2GfxclkM2 = + le16_to_cpu(profile->phyclk2gfxclk_b); + param->ulPhyclk2GfxclkB = + le32_to_cpu(profile->phyclk2gfxclk_c); + + return 0; +} + +int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_gpio_parameters *param) +{ + struct atom_smu_info_v3_1 *info; + uint16_t idx; + + idx = GetIndexIntoMasterDataTable(smu_info); + info = (struct atom_smu_info_v3_1 *) + cgs_atom_get_data_table(hwmgr->device, + idx, NULL, NULL, NULL); + + if (!info) { + pr_info("Error retrieving BIOS smu_info Table Address!"); + return -1; + } + + param->ucAcDcGpio = info->ac_dc_gpio_bit; + param->ucAcDcPolarity = info->ac_dc_polarity; + param->ucVR0HotGpio = info->vr0hot_gpio_bit; + param->ucVR0HotPolarity = info->vr0hot_polarity; + param->ucVR1HotGpio = info->vr1hot_gpio_bit; + param->ucVR1HotPolarity = info->vr1hot_polarity; + param->ucFwCtfGpio = info->fw_ctf_gpio_bit; + param->ucFwCtfPolarity = info->fw_ctf_polarity; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h new file mode 100644 index 000000000000..7efe9b96cb33 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -0,0 +1,140 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_ATOMFWCTRL_H +#define PP_ATOMFWCTRL_H + +#include "hwmgr.h" + +#define GetIndexIntoMasterCmdTable(FieldName) \ + (((char*)(&((struct atom_master_list_of_command_functions_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t)) +#define GetIndexIntoMasterDataTable(FieldName) \ + (((char*)(&((struct atom_master_list_of_data_tables_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t)) + +#define PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES 32 + +struct pp_atomfwctrl_voltage_table_entry { + uint16_t value; + uint32_t smio_low; +}; + +struct pp_atomfwctrl_voltage_table { + uint32_t count; + uint32_t mask_low; + uint32_t phase_delay; + uint8_t psi0_enable; + uint8_t psi1_enable; + uint8_t max_vid_step; + uint8_t telemetry_offset; + uint8_t telemetry_slope; + struct pp_atomfwctrl_voltage_table_entry entries[PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES]; +}; + +struct pp_atomfwctrl_gpio_pin_assignment { + uint16_t us_gpio_pin_aindex; + uint8_t uc_gpio_pin_bit_shift; +}; + +struct pp_atomfwctrl_clock_dividers_soc15 { + uint32_t ulClock; /* the actual clock */ + uint32_t ulDid; /* DFS divider */ + uint32_t ulPll_fb_mult; /* Feedback Multiplier: bit 8:0 int, bit 15:12 post_div, bit 31:16 frac */ + uint32_t ulPll_ss_fbsmult; /* Spread FB Multiplier: bit 8:0 int, bit 31:16 frac */ + uint16_t usPll_ss_slew_frac; + uint8_t ucPll_ss_enable; + uint8_t ucReserve; + uint32_t ulReserve[2]; +}; + +struct pp_atomfwctrl_avfs_parameters { + uint32_t ulMaxVddc; + uint32_t ulMinVddc; + uint8_t ucMaxVidStep; + uint32_t ulMeanNsigmaAcontant0; + uint32_t ulMeanNsigmaAcontant1; + uint32_t ulMeanNsigmaAcontant2; + uint16_t usMeanNsigmaDcTolSigma; + uint16_t usMeanNsigmaPlatformMean; + uint16_t usMeanNsigmaPlatformSigma; + uint32_t ulGbVdroopTableCksoffA0; + uint32_t ulGbVdroopTableCksoffA1; + uint32_t ulGbVdroopTableCksoffA2; + uint32_t ulGbVdroopTableCksonA0; + uint32_t ulGbVdroopTableCksonA1; + uint32_t ulGbVdroopTableCksonA2; + uint32_t ulGbFuseTableCksoffM1; + uint16_t usGbFuseTableCksoffM2; + uint32_t ulGbFuseTableCksoffB;\ + uint32_t ulGbFuseTableCksonM1; + uint16_t usGbFuseTableCksonM2; + uint32_t ulGbFuseTableCksonB; + uint16_t usMaxVoltage025mv; + uint8_t ucEnableGbVdroopTableCksoff; + uint8_t ucEnableGbVdroopTableCkson; + uint8_t ucEnableGbFuseTableCksoff; + uint8_t ucEnableGbFuseTableCkson; + uint16_t usPsmAgeComfactor; + uint8_t ucEnableApplyAvfsCksoffVoltage; + uint32_t ulDispclk2GfxclkM1; + uint16_t usDispclk2GfxclkM2; + uint32_t ulDispclk2GfxclkB; + uint32_t ulDcefclk2GfxclkM1; + uint16_t usDcefclk2GfxclkM2; + uint32_t ulDcefclk2GfxclkB; + uint32_t ulPixelclk2GfxclkM1; + uint16_t usPixelclk2GfxclkM2; + uint32_t ulPixelclk2GfxclkB; + uint32_t ulPhyclk2GfxclkM1; + uint16_t usPhyclk2GfxclkM2; + uint32_t ulPhyclk2GfxclkB; +}; + +struct pp_atomfwctrl_gpio_parameters { + uint8_t ucAcDcGpio; + uint8_t ucAcDcPolarity; + uint8_t ucVR0HotGpio; + uint8_t ucVR0HotPolarity; + uint8_t ucVR1HotGpio; + uint8_t ucVR1HotPolarity; + uint8_t ucFwCtfGpio; + uint8_t ucFwCtfPolarity; +}; +int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, + uint32_t clock_type, uint32_t clock_value, + struct pp_atomfwctrl_clock_dividers_soc15 *dividers); +int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr); +bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pin_id, + struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment); + +int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, uint8_t voltage_type, + uint8_t voltage_mode, struct pp_atomfwctrl_voltage_table *voltage_table); +bool pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(struct pp_hwmgr *hwmgr, + uint8_t voltage_type, uint8_t voltage_mode); + +int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_avfs_parameters *param); +int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_gpio_parameters *param); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f75ee33ec5bb..8f663ab56a80 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -89,6 +89,7 @@ enum DPM_EVENT_SRC { DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 }; +static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable); static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); @@ -1309,11 +1310,9 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable thermal auto throttle!", result = tmp_result); - if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)), - "Failed to disable AVFS!", - return -EINVAL); - } + tmp_result = smu7_avfs_control(hwmgr, false); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable AVFS!", result = tmp_result); tmp_result = smu7_stop_dpm(hwmgr); PP_ASSERT_WITH_CODE((tmp_result == 0), @@ -1544,7 +1543,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) if (vddc >= 2000 || vddc == 0) return -EINVAL; } else { - pr_warning("failed to retrieving EVV voltage!\n"); + pr_warn("failed to retrieving EVV voltage!\n"); continue; } @@ -3289,22 +3288,60 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, return 0; } -static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) +static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, + struct pp_gpu_power *query) +{ + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PmStatusLogStart), + "Failed to start pm status log!", + return -1); + + msleep_interruptible(20); + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PmStatusLogSample), + "Failed to sample pm status log!", + return -1); + + query->vddc_power = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixSMU_PM_STATUS_40); + query->vddci_power = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixSMU_PM_STATUS_49); + query->max_gpu_power = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixSMU_PM_STATUS_94); + query->average_gpu_power = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixSMU_PM_STATUS_95); + + return 0; +} + +static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, + void *value, int *size) { uint32_t sclk, mclk, activity_percent; uint32_t offset; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + /* size must be at least 4 bytes for all sensors */ + if (*size < 4) + return -EINVAL; + switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - *value = sclk; + *((uint32_t *)value) = sclk; + *size = 4; return 0; case AMDGPU_PP_SENSOR_GFX_MCLK: smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - *value = mclk; + *((uint32_t *)value) = mclk; + *size = 4; return 0; case AMDGPU_PP_SENSOR_GPU_LOAD: offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, @@ -3314,17 +3351,26 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); activity_percent += 0x80; activity_percent >>= 8; - *value = activity_percent > 100 ? 100 : activity_percent; + *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; + *size = 4; return 0; case AMDGPU_PP_SENSOR_GPU_TEMP: - *value = smu7_thermal_get_temperature(hwmgr); + *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); + *size = 4; return 0; case AMDGPU_PP_SENSOR_UVD_POWER: - *value = data->uvd_power_gated ? 0 : 1; + *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; + *size = 4; return 0; case AMDGPU_PP_SENSOR_VCE_POWER: - *value = data->vce_power_gated ? 0 : 1; + *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; + *size = 4; return 0; + case AMDGPU_PP_SENSOR_GPU_POWER: + if (*size < sizeof(struct pp_gpu_power)) + return -EINVAL; + *size = sizeof(struct pp_gpu_power); + return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); default: return -EINVAL; } @@ -4502,6 +4548,102 @@ static int smu7_release_firmware(struct pp_hwmgr *hwmgr) return 0; } +static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr, + uint32_t *sclk_mask, uint32_t *mclk_mask, + uint32_t min_sclk, uint32_t min_mclk) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &(data->dpm_table); + uint32_t i; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + if (dpm_table->sclk_table.dpm_levels[i].enabled && + dpm_table->sclk_table.dpm_levels[i].value >= min_sclk) + *sclk_mask |= 1 << i; + } + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + if (dpm_table->mclk_table.dpm_levels[i].enabled && + dpm_table->mclk_table.dpm_levels[i].value >= min_mclk) + *mclk_mask |= 1 << i; + } +} + +static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int tmp_result, result = 0; + uint32_t sclk_mask = 0, mclk_mask = 0; + + if (hwmgr->chip_id == CHIP_FIJI) { + if (request->type == AMD_PP_GFX_PROFILE) + smu7_enable_power_containment(hwmgr); + else if (request->type == AMD_PP_COMPUTE_PROFILE) + smu7_disable_power_containment(hwmgr); + } + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) + return -EINVAL; + + tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to freeze SCLK MCLK DPM!", + result = tmp_result); + + tmp_result = smum_populate_requested_graphic_levels(hwmgr, request); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to populate requested graphic levels!", + result = tmp_result); + + tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to unfreeze SCLK MCLK DPM!", + result = tmp_result); + + smu7_find_min_clock_masks(hwmgr, &sclk_mask, &mclk_mask, + request->min_sclk, request->min_mclk); + + if (sclk_mask) { + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask. + sclk_dpm_enable_mask & + sclk_mask); + } + + if (mclk_mask) { + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask. + mclk_dpm_enable_mask & + mclk_mask); + } + + return result; +} + +static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) +{ + if (enable) { + if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( + hwmgr->smumgr, PPSMC_MSG_EnableAvfs), + "Failed to enable AVFS!", + return -EINVAL); + } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( + hwmgr->smumgr, PPSMC_MSG_DisableAvfs), + "Failed to disable AVFS!", + return -EINVAL); + + return 0; +} + static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .backend_init = &smu7_hwmgr_backend_init, .backend_fini = &smu7_hwmgr_backend_fini, @@ -4551,6 +4693,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .dynamic_state_management_disable = smu7_disable_dpm_tasks, .request_firmware = smu7_request_firmware, .release_firmware = smu7_release_firmware, + .set_power_profile_state = smu7_set_power_profile_state, + .avfs_control = smu7_avfs_control, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c new file mode 100644 index 000000000000..83949550edac --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -0,0 +1,4450 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" + +#include "hwmgr.h" +#include "amd_powerplay.h" +#include "vega10_smumgr.h" +#include "hardwaremanager.h" +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "cgs_common.h" +#include "vega10_powertune.h" +#include "smu9.h" +#include "smu9_driver_if.h" +#include "vega10_inc.h" +#include "pp_soc15.h" +#include "pppcielanes.h" +#include "vega10_hwmgr.h" +#include "vega10_processpptables.h" +#include "vega10_pptable.h" +#include "vega10_thermal.h" +#include "pp_debug.h" +#include "pp_acpi.h" +#include "amd_pcie_helpers.h" +#include "cgs_linux.h" +#include "ppinterrupt.h" + + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +#define HBM_MEMORY_CHANNEL_WIDTH 128 + +uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; + +#define MEM_FREQ_LOW_LATENCY 25000 +#define MEM_FREQ_HIGH_LATENCY 80000 +#define MEM_LATENCY_HIGH 245 +#define MEM_LATENCY_LOW 35 +#define MEM_LATENCY_ERR 0xFFFF + +#define mmDF_CS_AON0_DramBaseAddress0 0x0044 +#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 + +//DF_CS_AON0_DramBaseAddress0 +#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 +#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 +#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 +#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 +#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc +#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L +#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L +#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L +#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L +#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L + +const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); + +struct vega10_power_state *cast_phw_vega10_power_state( + struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL;); + + return (struct vega10_power_state *)hw_ps; +} + +const struct vega10_power_state *cast_const_phw_vega10_power_state( + const struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL;); + + return (const struct vega10_power_state *)hw_ps; +} + +static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->registry_data.sclk_dpm_key_disabled = + hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; + data->registry_data.socclk_dpm_key_disabled = + hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true; + data->registry_data.mclk_dpm_key_disabled = + hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; + + data->registry_data.dcefclk_dpm_key_disabled = + hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true; + + if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) { + data->registry_data.power_containment_support = 1; + data->registry_data.enable_pkg_pwr_tracking_feature = 1; + data->registry_data.enable_tdc_limit_feature = 1; + } + + data->registry_data.pcie_dpm_key_disabled = 1; + data->registry_data.disable_water_mark = 0; + + data->registry_data.fan_control_support = 1; + data->registry_data.thermal_support = 1; + data->registry_data.fw_ctf_enabled = 1; + + data->registry_data.avfs_support = 1; + data->registry_data.led_dpm_enabled = 1; + + data->registry_data.vr0hot_enabled = 1; + data->registry_data.vr1hot_enabled = 1; + data->registry_data.regulator_hot_gpio_support = 1; + + data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT; + data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + + data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT; + data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT; + data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT; + data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT; +} + +static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct cgs_system_info sys_info = {0}; + int result; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + + if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + + if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + + /* power tune caps */ + /* assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + if (data->registry_data.power_containment_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + if (table_info->tdp_table->usClockStretchAmount && + data->registry_data.clock_stretcher_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEDPM); + + return 0; +} + +static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + int i; + + vega10_initialize_power_tune_defaults(hwmgr); + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + data->smu_features[i].smu_feature_id = 0xffff; + data->smu_features[i].smu_feature_bitmap = 1 << i; + data->smu_features[i].enabled = false; + data->smu_features[i].supported = false; + } + + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = + FEATURE_DPM_PREFETCHER_BIT; + data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = + FEATURE_DPM_GFXCLK_BIT; + data->smu_features[GNLD_DPM_UCLK].smu_feature_id = + FEATURE_DPM_UCLK_BIT; + data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = + FEATURE_DPM_SOCCLK_BIT; + data->smu_features[GNLD_DPM_UVD].smu_feature_id = + FEATURE_DPM_UVD_BIT; + data->smu_features[GNLD_DPM_VCE].smu_feature_id = + FEATURE_DPM_VCE_BIT; + data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = + FEATURE_DPM_MP0CLK_BIT; + data->smu_features[GNLD_DPM_LINK].smu_feature_id = + FEATURE_DPM_LINK_BIT; + data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = + FEATURE_DPM_DCEFCLK_BIT; + data->smu_features[GNLD_ULV].smu_feature_id = + FEATURE_ULV_BIT; + data->smu_features[GNLD_AVFS].smu_feature_id = + FEATURE_AVFS_BIT; + data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = + FEATURE_DS_GFXCLK_BIT; + data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = + FEATURE_DS_SOCCLK_BIT; + data->smu_features[GNLD_DS_LCLK].smu_feature_id = + FEATURE_DS_LCLK_BIT; + data->smu_features[GNLD_PPT].smu_feature_id = + FEATURE_PPT_BIT; + data->smu_features[GNLD_TDC].smu_feature_id = + FEATURE_TDC_BIT; + data->smu_features[GNLD_THERMAL].smu_feature_id = + FEATURE_THERMAL_BIT; + data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = + FEATURE_GFX_PER_CU_CG_BIT; + data->smu_features[GNLD_RM].smu_feature_id = + FEATURE_RM_BIT; + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = + FEATURE_DS_DCEFCLK_BIT; + data->smu_features[GNLD_ACDC].smu_feature_id = + FEATURE_ACDC_BIT; + data->smu_features[GNLD_VR0HOT].smu_feature_id = + FEATURE_VR0HOT_BIT; + data->smu_features[GNLD_VR1HOT].smu_feature_id = + FEATURE_VR1HOT_BIT; + data->smu_features[GNLD_FW_CTF].smu_feature_id = + FEATURE_FW_CTF_BIT; + data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = + FEATURE_LED_DISPLAY_BIT; + data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = + FEATURE_FAN_CONTROL_BIT; + data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id = + FEATURE_VOLTAGE_CONTROLLER_BIT; + + if (!data->registry_data.prefetcher_dpm_key_disabled) + data->smu_features[GNLD_DPM_PREFETCHER].supported = true; + + if (!data->registry_data.sclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_GFXCLK].supported = true; + + if (!data->registry_data.mclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_UCLK].supported = true; + + if (!data->registry_data.socclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_SOCCLK].supported = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM)) + data->smu_features[GNLD_DPM_UVD].supported = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEDPM)) + data->smu_features[GNLD_DPM_VCE].supported = true; + + if (!data->registry_data.pcie_dpm_key_disabled) + data->smu_features[GNLD_DPM_LINK].supported = true; + + if (!data->registry_data.dcefclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_DCEFCLK].supported = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep) && + data->registry_data.sclk_deep_sleep_support) { + data->smu_features[GNLD_DS_GFXCLK].supported = true; + data->smu_features[GNLD_DS_SOCCLK].supported = true; + data->smu_features[GNLD_DS_LCLK].supported = true; + } + + if (data->registry_data.enable_pkg_pwr_tracking_feature) + data->smu_features[GNLD_PPT].supported = true; + + if (data->registry_data.enable_tdc_limit_feature) + data->smu_features[GNLD_TDC].supported = true; + + if (data->registry_data.thermal_support) + data->smu_features[GNLD_THERMAL].supported = true; + + if (data->registry_data.fan_control_support) + data->smu_features[GNLD_FAN_CONTROL].supported = true; + + if (data->registry_data.fw_ctf_enabled) + data->smu_features[GNLD_FW_CTF].supported = true; + + if (data->registry_data.avfs_support) + data->smu_features[GNLD_AVFS].supported = true; + + if (data->registry_data.led_dpm_enabled) + data->smu_features[GNLD_LED_DISPLAY].supported = true; + + if (data->registry_data.vr1hot_enabled) + data->smu_features[GNLD_VR1HOT].supported = true; + + if (data->registry_data.vr0hot_enabled) + data->smu_features[GNLD_VR0HOT].supported = true; + +} + +#ifdef PPLIB_VEGA10_EVV_SUPPORT +static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + uint16_t virtual_voltage_id, int32_t *socclk) +{ + uint8_t entry_id; + uint8_t voltage_id; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + PP_ASSERT_WITH_CODE(lookup_table->count != 0, + "Lookup table is empty", + return -EINVAL); + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */ + for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) { + voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd; + if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) + break; + } + + PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count, + "Can't find requested voltage id in vdd_dep_on_socclk table!", + return -EINVAL); + + *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk; + + return 0; +} + +#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 +/** +* Get Leakage VDDC based on leakage ID. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0. +*/ +static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint16_t vv_id; + uint32_t vddc = 0; + uint16_t i, j; + uint32_t sclk = 0; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = + table_info->vdd_dep_on_socclk; + int result; + + for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) { + vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + if (!vega10_get_socclk_for_voltage_evv(hwmgr, + table_info->vddc_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < socclk_table->count; j++) { + if (socclk_table->entries[j].clk == sclk && + socclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + + PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, + VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), + "Error retrieving EVV voltage value!", + continue); + + + /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ + PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), + "Invalid VDDC value", result = -EINVAL;); + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddc != 0 && vddc != vv_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100); + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; + data->vddc_leakage.count++; + } + } + } + + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint16_t *voltage, struct vega10_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + pr_info("Voltage value looks like a Leakage ID \ + but it's not patched\n"); +} + +/** +* Patch voltage lookup table by EVV leakages. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pointer to voltage lookup table +* @param pointer to leakage table +* @return always 0 +*/ +static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + struct vega10_leakage_voltage *leakage_table) +{ + uint32_t i; + + for (i = 0; i < lookup_table->count; i++) + vega10_patch_with_vdd_leakage(hwmgr, + &lookup_table->entries[i].us_vdd, leakage_table); + + return 0; +} + +static int vega10_patch_clock_voltage_limits_with_vddc_leakage( + struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table, + uint16_t *vddc) +{ + vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); + + return 0; +} +#endif + +static int vega10_patch_voltage_dependency_tables_with_lookup_table( + struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + uint8_t voltage_id; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = + table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table = + table_info->vdd_dep_on_dcefclk; + struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table = + table_info->vdd_dep_on_pixclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table = + table_info->vdd_dep_on_dispclk; + struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table = + table_info->vdd_dep_on_phyclk; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + for (entry_id = 0; entry_id < socclk_table->count; entry_id++) { + voltage_id = socclk_table->entries[entry_id].vddInd; + socclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) { + voltage_id = gfxclk_table->entries[entry_id].vddInd; + gfxclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) { + voltage_id = dcefclk_table->entries[entry_id].vddInd; + dcefclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) { + voltage_id = pixclk_table->entries[entry_id].vddInd; + pixclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) { + voltage_id = dspclk_table->entries[entry_id].vddInd; + dspclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) { + voltage_id = phyclk_table->entries[entry_id].vddInd; + phyclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { + voltage_id = mclk_table->entries[entry_id].vddInd; + mclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + voltage_id = mclk_table->entries[entry_id].vddciInd; + mclk_table->entries[entry_id].vddci = + table_info->vddci_lookup_table->entries[voltage_id].us_vdd; + voltage_id = mclk_table->entries[entry_id].mvddInd; + mclk_table->entries[entry_id].mvdd = + table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { + voltage_id = mm_table->entries[entry_id].vddcInd; + mm_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + return 0; + +} + +static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + uint32_t table_size, i, j; + struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; + + PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, + "Lookup table is empty", return -EINVAL); + + table_size = lookup_table->count; + + /* Sorting voltages */ + for (i = 0; i < table_size - 1; i++) { + for (j = i + 1; j > 0; j--) { + if (lookup_table->entries[j].us_vdd < + lookup_table->entries[j - 1].us_vdd) { + tmp_voltage_lookup_record = lookup_table->entries[j - 1]; + lookup_table->entries[j - 1] = lookup_table->entries[j]; + lookup_table->entries[j] = tmp_voltage_lookup_record; + } + } + } + + return 0; +} + +static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr) +{ + int result = 0; + int tmp_result; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); +#ifdef PPLIB_VEGA10_EVV_SUPPORT + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr, + table_info->vddc_lookup_table, &(data->vddc_leakage)); + if (tmp_result) + result = tmp_result; + + tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, + &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); + if (tmp_result) + result = tmp_result; +#endif + + tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); + if (tmp_result) + result = tmp_result; + + return result; +} + +static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = + table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = + table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table, + "VDD dependency on SCLK table is missing. \ + This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, + "VDD dependency on SCLK table is empty. \ + This table is mandatory", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table, + "VDD dependency on MCLK table is missing. \ + This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, + "VDD dependency on MCLK table is empty. \ + This table is mandatory", return -EINVAL); + + table_info->max_clock_voltage_on_ac.sclk = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.mclk = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.vddc = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + table_info->max_clock_voltage_on_ac.vddci = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + table_info->max_clock_voltage_on_ac.sclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + table_info->max_clock_voltage_on_ac.mclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + table_info->max_clock_voltage_on_ac.vddc; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = + table_info->max_clock_voltage_on_ac.vddci; + + return 0; +} + +static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + + kfree(hwmgr->backend); + hwmgr->backend = NULL; + + return 0; +} + +static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vega10_hwmgr *data; + uint32_t config_telemetry = 0; + struct pp_atomfwctrl_voltage_table vol_table; + struct cgs_system_info sys_info = {0}; + + data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + vega10_set_default_registry_data(hwmgr); + + data->disable_dpm_mask = 0xff; + data->workload_mask = 0xff; + + /* need to set voltage control types before EVV patching */ + data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE; + data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE; + data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE; + + /* VDDCR_SOC */ + if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { + if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, + &vol_table)) { + config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) | + (vol_table.telemetry_offset & 0xff); + data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; + } + } else { + kfree(hwmgr->backend); + hwmgr->backend = NULL; + PP_ASSERT_WITH_CODE(false, + "VDDCR_SOC is not SVID2!", + return -1); + } + + /* MVDDC */ + if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) { + if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2, + &vol_table)) { + config_telemetry |= + ((vol_table.telemetry_slope << 24) & 0xff000000) | + ((vol_table.telemetry_offset << 16) & 0xff0000); + data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; + } + } + + /* VDDCI_MEM */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO; + } + + data->config_telemetry = config_telemetry; + + vega10_set_features_platform_caps(hwmgr); + + vega10_init_dpm_defaults(hwmgr); + +#ifdef PPLIB_VEGA10_EVV_SUPPORT + /* Get leakage voltage based on leakage ID. */ + PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr), + "Get EVV Voltage Failed. Abort Driver loading!", + return -1); +#endif + + /* Patch our voltage dependency table with actual leakage voltage + * We need to perform leakage translation before it's used by other functions + */ + vega10_complete_dependency_tables(hwmgr); + + /* Parse pptable data read from VBIOS */ + vega10_set_private_data_based_on_pptable(hwmgr); + + data->is_tlu_enabled = false; + + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + VEGA10_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ + /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + data->total_active_cus = sys_info.value; + /* Setup default Overdrive Fan control settings */ + data->odn_fan_table.target_fan_speed = + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; + data->odn_fan_table.target_temperature = + hwmgr->thermal_controller. + advanceFanControlParameters.ucTargetTemperature; + data->odn_fan_table.min_performance_clock = + hwmgr->thermal_controller.advanceFanControlParameters. + ulMinFanSCLKAcousticLimit; + data->odn_fan_table.min_fan_limit = + hwmgr->thermal_controller. + advanceFanControlParameters.usFanPWMMinLimit * + hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; + + return result; +} + +static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + +static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + + struct pp_atomfwctrl_voltage_table table; + uint8_t i, j; + uint32_t mask = 0; + uint32_t tmp; + int32_t ret = 0; + + ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM, + VOLTAGE_OBJ_GPIO_LUT, &table); + + if (!ret) { + tmp = table.mask_low; + for (i = 0, j = 0; i < 32; i++) { + if (tmp & 1) { + mask |= (uint32_t)(i << (8 * j)); + if (++j >= 3) + break; + } + tmp >>= 1; + } + } + + pp_table->LedPin0 = (uint8_t)(mask & 0xff); + pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff); + pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff); + return 0; +} + +static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), + "Failed to init sclk threshold!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr), + "Failed to set up led dpm config!", + return -EINVAL); + + return 0; +} + +static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + uint32_t features_enabled; + + if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) { + if (features_enabled & SMC_DPM_FEATURES) + return true; + } + return false; +} + +/** +* Remove repeated voltage values and create table with unique values. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param vol_table the pointer to changing voltage table +* @return 0 in success +*/ + +static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + uint32_t i, j; + uint16_t vvalue; + bool found = false; + struct pp_atomfwctrl_voltage_table *table; + + PP_ASSERT_WITH_CODE(vol_table, + "Voltage Table empty.", return -EINVAL); + table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table), + GFP_KERNEL); + + if (!table) + return -ENOMEM; + + table->mask_low = vol_table->mask_low; + table->phase_delay = vol_table->phase_delay; + + for (i = 0; i < vol_table->count; i++) { + vvalue = vol_table->entries[i].value; + found = false; + + for (j = 0; j < table->count; j++) { + if (vvalue == table->entries[j].value) { + found = true; + break; + } + } + + if (!found) { + table->entries[table->count].value = vvalue; + table->entries[table->count].smio_low = + vol_table->entries[i].smio_low; + table->count++; + } + } + + memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table)); + kfree(table); + + return 0; +} + +static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + int i; + + PP_ASSERT_WITH_CODE(dep_table->count, + "Voltage Dependency Table empty.", + return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < vol_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].mvdd; + vol_table->entries[i].smio_low = 0; + } + + PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, + vol_table), + "Failed to trim MVDD Table!", + return -1); + + return 0; +} + +static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE(dep_table->count, + "Voltage Dependency Table empty.", + return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < dep_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].vddci; + vol_table->entries[i].smio_low = 0; + } + + PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table), + "Failed to trim VDDCI table.", + return -1); + + return 0; +} + +static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + int i; + + PP_ASSERT_WITH_CODE(dep_table->count, + "Voltage Dependency Table empty.", + return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < vol_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].vddc; + vol_table->entries[i].smio_low = 0; + } + + return 0; +} + +/* ---- Voltage Tables ---- + * If the voltage table would be bigger than + * what will fit into the state table on + * the SMC keep only the higher entries. + */ +static void vega10_trim_voltage_table_to_fit_state_table( + struct pp_hwmgr *hwmgr, + uint32_t max_vol_steps, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + unsigned int i, diff; + + if (vol_table->count <= max_vol_steps) + return; + + diff = vol_table->count - max_vol_steps; + + for (i = 0; i < max_vol_steps; i++) + vol_table->entries[i] = vol_table->entries[i + diff]; + + vol_table->count = max_vol_steps; +} + +/** +* Create Voltage Tables. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + int result; + + if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || + data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) { + result = vega10_get_mvdd_voltage_table(hwmgr, + table_info->vdd_dep_on_mclk, + &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE(!result, + "Failed to retrieve MVDDC table!", + return result); + } + + if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) { + result = vega10_get_vddci_voltage_table(hwmgr, + table_info->vdd_dep_on_mclk, + &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE(!result, + "Failed to retrieve VDDCI_MEM table!", + return result); + } + + if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || + data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) { + result = vega10_get_vdd_voltage_table(hwmgr, + table_info->vdd_dep_on_sclk, + &(data->vddc_voltage_table)); + PP_ASSERT_WITH_CODE(!result, + "Failed to retrieve VDDCR_SOC table!", + return result); + } + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16, + "Too many voltage values for VDDC. Trimming to fit state table.", + vega10_trim_voltage_table_to_fit_state_table(hwmgr, + 16, &(data->vddc_voltage_table))); + + PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16, + "Too many voltage values for VDDCI. Trimming to fit state table.", + vega10_trim_voltage_table_to_fit_state_table(hwmgr, + 16, &(data->vddci_voltage_table))); + + PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16, + "Too many voltage values for MVDD. Trimming to fit state table.", + vega10_trim_voltage_table_to_fit_state_table(hwmgr, + 16, &(data->mvdd_voltage_table))); + + + return 0; +} + +/* + * @fn vega10_init_dpm_state + * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. + * + * @param dpm_state - the address of the DPM Table to initiailize. + * @return None. + */ +static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state) +{ + dpm_state->soft_min_level = 0xff; + dpm_state->soft_max_level = 0xff; + dpm_state->hard_min_level = 0xff; + dpm_state->hard_max_level = 0xff; +} + +static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr, + struct vega10_single_dpm_table *dpm_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + int i; + + for (i = 0; i < dep_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value != + dep_table->entries[i].clk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_table->entries[i].clk; + dpm_table->dpm_levels[dpm_table->count].enabled = true; + dpm_table->count++; + } + } +} +static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *bios_pcie_table = + table_info->pcie_table; + uint32_t i; + + PP_ASSERT_WITH_CODE(bios_pcie_table->count, + "Incorrect number of PCIE States from VBIOS!", + return -1); + + for (i = 0; i < NUM_LINK_LEVELS - 1; i++) { + if (data->registry_data.pcieSpeedOverride) + pcie_table->pcie_gen[i] = + data->registry_data.pcieSpeedOverride; + else + pcie_table->pcie_gen[i] = + bios_pcie_table->entries[i].gen_speed; + + if (data->registry_data.pcieLaneOverride) + pcie_table->pcie_lane[i] = + data->registry_data.pcieLaneOverride; + else + pcie_table->pcie_lane[i] = + bios_pcie_table->entries[i].lane_width; + + if (data->registry_data.pcieClockOverride) + pcie_table->lclk[i] = + data->registry_data.pcieClockOverride; + else + pcie_table->lclk[i] = + bios_pcie_table->entries[i].pcie_sclk; + + pcie_table->count++; + } + + if (data->registry_data.pcieSpeedOverride) + pcie_table->pcie_gen[i] = data->registry_data.pcieSpeedOverride; + else + pcie_table->pcie_gen[i] = + bios_pcie_table->entries[bios_pcie_table->count - 1].gen_speed; + + if (data->registry_data.pcieLaneOverride) + pcie_table->pcie_lane[i] = data->registry_data.pcieLaneOverride; + else + pcie_table->pcie_lane[i] = + bios_pcie_table->entries[bios_pcie_table->count - 1].lane_width; + + if (data->registry_data.pcieClockOverride) + pcie_table->lclk[i] = data->registry_data.pcieClockOverride; + else + pcie_table->lclk[i] = + bios_pcie_table->entries[bios_pcie_table->count - 1].pcie_sclk; + + pcie_table->count++; + + return 0; +} + +/* + * This function is to initialize all DPM state tables + * for SMU based on the dependency table. + * Dynamic state patching function will then trim these + * state tables to the allowed range based + * on the power policy or external client requests, + * such as UVD request, etc. + */ +static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega10_single_dpm_table *dpm_table; + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table = + table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table = + table_info->mm_dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table = + table_info->vdd_dep_on_dcefclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table = + table_info->vdd_dep_on_pixclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table = + table_info->vdd_dep_on_dispclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table = + table_info->vdd_dep_on_phyclk; + + PP_ASSERT_WITH_CODE(dep_soc_table, + "SOCCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1, + "SOCCLK dependency table is empty. This table is mandatory", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_gfx_table, + "GFXCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1, + "GFXCLK dependency table is empty. This table is mandatory", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_mclk_table, + "MCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, + "MCLK dependency table has to have is missing. This table is mandatory", + return -EINVAL); + + /* Initialize Sclk DPM table based on allow Sclk values */ + data->dpm_table.soc_table.count = 0; + data->dpm_table.gfx_table.count = 0; + data->dpm_table.dcef_table.count = 0; + + dpm_table = &(data->dpm_table.soc_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_soc_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.gfx_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_gfx_table); + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mem_table.count = 0; + dpm_table = &(data->dpm_table.mem_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_mclk_table); + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + data->dpm_table.eclk_table.count = 0; + dpm_table = &(data->dpm_table.eclk_table); + for (i = 0; i < dep_mm_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels + [dpm_table->count - 1].value != + dep_mm_table->entries[i].eclk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_mm_table->entries[i].eclk; + dpm_table->dpm_levels[dpm_table->count].enabled = + (i == 0) ? true : false; + dpm_table->count++; + } + } + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + data->dpm_table.vclk_table.count = 0; + data->dpm_table.dclk_table.count = 0; + dpm_table = &(data->dpm_table.vclk_table); + for (i = 0; i < dep_mm_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels + [dpm_table->count - 1].value != + dep_mm_table->entries[i].vclk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_mm_table->entries[i].vclk; + dpm_table->dpm_levels[dpm_table->count].enabled = + (i == 0) ? true : false; + dpm_table->count++; + } + } + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.dclk_table); + for (i = 0; i < dep_mm_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels + [dpm_table->count - 1].value != + dep_mm_table->entries[i].dclk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_mm_table->entries[i].dclk; + dpm_table->dpm_levels[dpm_table->count].enabled = + (i == 0) ? true : false; + dpm_table->count++; + } + } + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + /* Assume there is no headless Vega10 for now */ + dpm_table = &(data->dpm_table.dcef_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_dcef_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.pixel_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_pix_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.display_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_disp_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.phy_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_phy_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + vega10_setup_default_pcie_table(hwmgr); + + /* save a copy of the default DPM table */ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), + sizeof(struct vega10_dpm_table)); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport)) { + data->odn_dpm_table.odn_core_clock_dpm_levels. + number_of_performance_levels = data->dpm_table.gfx_table.count; + for (i = 0; i < data->dpm_table.gfx_table.count; i++) { + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[i].clock = + data->dpm_table.gfx_table.dpm_levels[i].value; + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[i].enabled = true; + } + + data->odn_dpm_table.vdd_dependency_on_sclk.count = + dep_gfx_table->count; + for (i = 0; i < dep_gfx_table->count; i++) { + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk = + dep_gfx_table->entries[i].clk; + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd = + dep_gfx_table->entries[i].vddInd; + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable = + dep_gfx_table->entries[i].cks_enable; + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset = + dep_gfx_table->entries[i].cks_voffset; + } + + data->odn_dpm_table.odn_memory_clock_dpm_levels. + number_of_performance_levels = data->dpm_table.mem_table.count; + for (i = 0; i < data->dpm_table.mem_table.count; i++) { + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[i].clock = + data->dpm_table.mem_table.dpm_levels[i].value; + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[i].enabled = true; + } + + data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count; + for (i = 0; i < dep_mclk_table->count; i++) { + data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk = + dep_mclk_table->entries[i].clk; + data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd = + dep_mclk_table->entries[i].vddInd; + data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci = + dep_mclk_table->entries[i].vddci; + } + } + + return 0; +} + +/* + * @fn vega10_populate_ulv_state + * @brief Function to provide parameters for Utral Low Voltage state to SMC. + * + * @param hwmgr - the address of the hardware manager. + * @return Always 0. + */ +static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + data->smc_state_table.pp_table.UlvOffsetVid = + (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / + VOLTAGE_VID_OFFSET_SCALE1); + + data->smc_state_table.pp_table.UlvSmnclkDid = + (uint8_t)(table_info->us_ulv_smnclk_did); + data->smc_state_table.pp_table.UlvMp1clkDid = + (uint8_t)(table_info->us_ulv_mp1clk_did); + data->smc_state_table.pp_table.UlvGfxclkBypass = + (uint8_t)(table_info->us_ulv_gfxclk_bypass); + data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 = + (uint8_t)(data->vddc_voltage_table.psi0_enable); + data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 = + (uint8_t)(data->vddc_voltage_table.psi1_enable); + + return 0; +} + +static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr, + uint32_t lclock, uint8_t *curr_lclk_did) +{ + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( + hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + lclock, ÷rs), + "Failed to get LCLK clock settings from VBIOS!", + return -1); + + *curr_lclk_did = dividers.ulDid; + + return 0; +} + +static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) +{ + int result = -1; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_pcie_table *pcie_table = + &(data->dpm_table.pcie_table); + uint32_t i, j; + + for (i = 0; i < pcie_table->count; i++) { + pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i]; + pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i]; + + result = vega10_populate_single_lclk_level(hwmgr, + pcie_table->lclk[i], &(pp_table->LclkDid[i])); + if (result) { + pr_info("Populate LClock Level %d Failed!\n", i); + return result; + } + } + + j = i - 1; + while (i < NUM_LINK_LEVELS) { + pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j]; + pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j]; + + result = vega10_populate_single_lclk_level(hwmgr, + pcie_table->lclk[j], &(pp_table->LclkDid[i])); + if (result) { + pr_info("Populate LClock Level %d Failed!\n", i); + return result; + } + i++; + } + + return result; +} + +/** +* Populates single SMC GFXSCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param gfx_clock the GFX clock to use to populate the structure. +* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure. +*/ + +static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk = + table_info->vdd_dep_on_sclk; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_VDDC) + dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) + &(data->odn_dpm_table.vdd_dependency_on_sclk); + + PP_ASSERT_WITH_CODE(dep_on_sclk, + "Invalid SOC_VDD-GFX_CLK Dependency Table!", + return -EINVAL); + + for (i = 0; i < dep_on_sclk->count; i++) { + if (dep_on_sclk->entries[i].clk == gfx_clock) + break; + } + + PP_ASSERT_WITH_CODE(dep_on_sclk->count > i, + "Cannot find gfx_clk in SOC_VDD-GFX_CLK!", + return -EINVAL); + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK, + gfx_clock, ÷rs), + "Failed to get GFX Clock settings from VBIOS!", + return -EINVAL); + + /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */ + current_gfxclk_level->FbMult = + cpu_to_le32(dividers.ulPll_fb_mult); + /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */ + current_gfxclk_level->SsOn = dividers.ucPll_ss_enable; + current_gfxclk_level->SsFbMult = + cpu_to_le32(dividers.ulPll_ss_fbsmult); + current_gfxclk_level->SsSlewFrac = + cpu_to_le16(dividers.usPll_ss_slew_frac); + current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); + + return 0; +} + +/** + * @brief Populates single SMC SOCCLK structure using the provided clock. + * + * @param hwmgr - the address of the hardware manager. + * @param soc_clock - the SOC clock to use to populate the structure. + * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure. + * @return 0 on success.. + */ +static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, + uint32_t soc_clock, uint8_t *current_soc_did, + uint8_t *current_vol_index) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc = + table_info->vdd_dep_on_socclk; + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + PP_ASSERT_WITH_CODE(dep_on_soc, + "Invalid SOC_VDD-SOC_CLK Dependency Table!", + return -EINVAL); + for (i = 0; i < dep_on_soc->count; i++) { + if (dep_on_soc->entries[i].clk == soc_clock) + break; + } + PP_ASSERT_WITH_CODE(dep_on_soc->count > i, + "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", + return -EINVAL); + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + soc_clock, ÷rs), + "Failed to get SOC Clock settings from VBIOS!", + return -EINVAL); + + *current_soc_did = (uint8_t)dividers.ulDid; + *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); + + return 0; +} + +uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr, + uint32_t clk, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint16_t i; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].clk == clk) + return dep_table->entries[i].vddc; + } + + pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!"); + return 0; +} + +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_socclk; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); + int result = 0; + uint32_t i, j; + + for (i = 0; i < dpm_table->count; i++) { + result = vega10_populate_single_gfx_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->GfxclkLevel[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_GFXCLK_DPM_LEVELS) { + result = vega10_populate_single_gfx_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->GfxclkLevel[i])); + if (result) + return result; + i++; + } + + pp_table->GfxclkSlewRate = + cpu_to_le16(table_info->us_gfxclk_slew_rate); + + dpm_table = &(data->dpm_table.soc_table); + for (i = 0; i < dpm_table->count; i++) { + pp_table->SocVid[i] = + (uint8_t)convert_to_vid( + vega10_locate_vddc_given_clock(hwmgr, + dpm_table->dpm_levels[i].value, + dep_table)); + result = vega10_populate_single_soc_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->SocclkDid[i]), + &(pp_table->SocDpmVoltageIndex[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_SOCCLK_DPM_LEVELS) { + pp_table->SocVid[i] = pp_table->SocVid[j]; + result = vega10_populate_single_soc_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->SocclkDid[i]), + &(pp_table->SocDpmVoltageIndex[i])); + if (result) + return result; + i++; + } + + return result; +} + +/** + * @brief Populates single SMC GFXCLK structure using the provided clock. + * + * @param hwmgr - the address of the hardware manager. + * @param mem_clock - the memory clock to use to populate the structure. + * @return 0 on success.. + */ +static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t mem_clock, uint8_t *current_mem_vid, + PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk = + table_info->vdd_dep_on_mclk; + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_VDDC) + dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) + &data->odn_dpm_table.vdd_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(dep_on_mclk, + "Invalid SOC_VDD-UCLK Dependency Table!", + return -EINVAL); + + for (i = 0; i < dep_on_mclk->count; i++) { + if (dep_on_mclk->entries[i].clk == mem_clock) + break; + } + + PP_ASSERT_WITH_CODE(dep_on_mclk->count > i, + "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( + hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs), + "Failed to get UCLK settings from VBIOS!", + return -1); + + *current_mem_vid = + (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd)); + *current_mem_soc_vind = + (uint8_t)(dep_on_mclk->entries[i].vddInd); + current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult); + current_memclk_level->Did = (uint8_t)(dividers.ulDid); + + PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1, + "Invalid Divider ID!", + return -EINVAL); + + return 0; +} + +/** + * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states. + * + * @param pHwMgr - the address of the hardware manager. + * @return PP_Result_OK on success. + */ +static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *dpm_table = + &(data->dpm_table.mem_table); + int result = 0; + uint32_t i, j, reg, mem_channels; + + for (i = 0; i < dpm_table->count; i++) { + result = vega10_populate_single_memory_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->MemVid[i]), + &(pp_table->UclkLevel[i]), + &(pp_table->MemSocVoltageIndex[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_UCLK_DPM_LEVELS) { + result = vega10_populate_single_memory_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->MemVid[i]), + &(pp_table->UclkLevel[i]), + &(pp_table->MemSocVoltageIndex[i])); + if (result) + return result; + i++; + } + + reg = soc15_get_register_offset(DF_HWID, 0, + mmDF_CS_AON0_DramBaseAddress0_BASE_IDX, + mmDF_CS_AON0_DramBaseAddress0); + mem_channels = (cgs_read_register(hwmgr->device, reg) & + DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> + DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + pp_table->NumMemoryChannels = cpu_to_le16(mem_channels); + pp_table->MemoryChannelWidth = + cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH * + channel_number[mem_channels]); + + pp_table->LowestUclkReservedForUlv = + (uint8_t)(data->lowest_uclk_reserved_for_ulv); + + return result; +} + +static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr, + DSPCLK_e disp_clock) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *) + (hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + uint32_t i; + uint16_t clk = 0, vddc = 0; + uint8_t vid = 0; + + switch (disp_clock) { + case DSPCLK_DCEFCLK: + dep_table = table_info->vdd_dep_on_dcefclk; + break; + case DSPCLK_DISPCLK: + dep_table = table_info->vdd_dep_on_dispclk; + break; + case DSPCLK_PIXCLK: + dep_table = table_info->vdd_dep_on_pixclk; + break; + case DSPCLK_PHYCLK: + dep_table = table_info->vdd_dep_on_phyclk; + break; + default: + return -1; + } + + PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS, + "Number Of Entries Exceeded maximum!", + return -1); + + for (i = 0; i < dep_table->count; i++) { + clk = (uint16_t)(dep_table->entries[i].clk / 100); + vddc = table_info->vddc_lookup_table-> + entries[dep_table->entries[i].vddInd].us_vdd; + vid = (uint8_t)convert_to_vid(vddc); + pp_table->DisplayClockTable[disp_clock][i].Freq = + cpu_to_le16(clk); + pp_table->DisplayClockTable[disp_clock][i].Vid = + cpu_to_le16(vid); + } + + while (i < NUM_DSPCLK_LEVELS) { + pp_table->DisplayClockTable[disp_clock][i].Freq = + cpu_to_le16(clk); + pp_table->DisplayClockTable[disp_clock][i].Vid = + cpu_to_le16(vid); + i++; + } + + return 0; +} + +static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr) +{ + uint32_t i; + + for (i = 0; i < DSPCLK_COUNT; i++) { + PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i), + "Failed to populate Clock in DisplayClockTable!", + return -1); + } + + return 0; +} + +static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr, + uint32_t eclock, uint8_t *current_eclk_did, + uint8_t *current_soc_vol) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = + table_info->mm_dep_table; + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + eclock, ÷rs), + "Failed to get ECLK clock settings from VBIOS!", + return -1); + + *current_eclk_did = (uint8_t)dividers.ulDid; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].eclk == eclock) + *current_soc_vol = dep_table->entries[i].vddcInd; + } + + return 0; +} + +static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table); + int result = -EINVAL; + uint32_t i, j; + + for (i = 0; i < dpm_table->count; i++) { + result = vega10_populate_single_eclock_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->EclkDid[i]), + &(pp_table->VceDpmVoltageIndex[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_VCE_DPM_LEVELS) { + result = vega10_populate_single_eclock_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->EclkDid[i]), + &(pp_table->VceDpmVoltageIndex[i])); + if (result) + return result; + i++; + } + + return result; +} + +static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr, + uint32_t vclock, uint8_t *current_vclk_did) +{ + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + vclock, ÷rs), + "Failed to get VCLK clock settings from VBIOS!", + return -EINVAL); + + *current_vclk_did = (uint8_t)dividers.ulDid; + + return 0; +} + +static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr, + uint32_t dclock, uint8_t *current_dclk_did) +{ + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + dclock, ÷rs), + "Failed to get DCLK clock settings from VBIOS!", + return -EINVAL); + + *current_dclk_did = (uint8_t)dividers.ulDid; + + return 0; +} + +static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *vclk_dpm_table = + &(data->dpm_table.vclk_table); + struct vega10_single_dpm_table *dclk_dpm_table = + &(data->dpm_table.dclk_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = + table_info->mm_dep_table; + int result = -EINVAL; + uint32_t i, j; + + for (i = 0; i < vclk_dpm_table->count; i++) { + result = vega10_populate_single_vclock_level(hwmgr, + vclk_dpm_table->dpm_levels[i].value, + &(pp_table->VclkDid[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_UVD_DPM_LEVELS) { + result = vega10_populate_single_vclock_level(hwmgr, + vclk_dpm_table->dpm_levels[j].value, + &(pp_table->VclkDid[i])); + if (result) + return result; + i++; + } + + for (i = 0; i < dclk_dpm_table->count; i++) { + result = vega10_populate_single_dclock_level(hwmgr, + dclk_dpm_table->dpm_levels[i].value, + &(pp_table->DclkDid[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_UVD_DPM_LEVELS) { + result = vega10_populate_single_dclock_level(hwmgr, + dclk_dpm_table->dpm_levels[j].value, + &(pp_table->DclkDid[i])); + if (result) + return result; + i++; + } + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vclk == + vclk_dpm_table->dpm_levels[i].value && + dep_table->entries[i].dclk == + dclk_dpm_table->dpm_levels[i].value) + pp_table->UvdDpmVoltageIndex[i] = + dep_table->entries[i].vddcInd; + else + return -1; + } + + j = i - 1; + while (i < NUM_UVD_DPM_LEVELS) { + pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd; + i++; + } + + return 0; +} + +static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_sclk; + uint32_t i; + + for (i = 0; dep_table->count; i++) { + pp_table->CksEnable[i] = dep_table->entries[i].cks_enable; + pp_table->CksVidOffset[i] = convert_to_vid( + dep_table->entries[i].cks_voffset); + } + + return 0; +} + +static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_sclk; + struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; + int result = 0; + uint32_t i; + + pp_table->MinVoltageVid = (uint8_t)0xff; + pp_table->MaxVoltageVid = (uint8_t)0; + + if (data->smu_features[GNLD_AVFS].supported) { + result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); + if (!result) { + pp_table->MinVoltageVid = (uint8_t) + convert_to_vid((uint16_t)(avfs_params.ulMaxVddc)); + pp_table->MaxVoltageVid = (uint8_t) + convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); + pp_table->BtcGbVdroopTableCksOn.a0 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0); + pp_table->BtcGbVdroopTableCksOn.a1 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1); + pp_table->BtcGbVdroopTableCksOn.a2 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2); + + pp_table->BtcGbVdroopTableCksOff.a0 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0); + pp_table->BtcGbVdroopTableCksOff.a1 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1); + pp_table->BtcGbVdroopTableCksOff.a2 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2); + + pp_table->AvfsGbCksOn.m1 = + cpu_to_le32(avfs_params.ulGbFuseTableCksonM1); + pp_table->AvfsGbCksOn.m2 = + cpu_to_le16(avfs_params.usGbFuseTableCksonM2); + pp_table->AvfsGbCksOn.b = + cpu_to_le32(avfs_params.ulGbFuseTableCksonB); + pp_table->AvfsGbCksOn.m1_shift = 24; + pp_table->AvfsGbCksOn.m2_shift = 12; + + pp_table->AvfsGbCksOff.m1 = + cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1); + pp_table->AvfsGbCksOff.m2 = + cpu_to_le16(avfs_params.usGbFuseTableCksoffM2); + pp_table->AvfsGbCksOff.b = + cpu_to_le32(avfs_params.ulGbFuseTableCksoffB); + pp_table->AvfsGbCksOff.m1_shift = 24; + pp_table->AvfsGbCksOff.m2_shift = 12; + + pp_table->AConstant[0] = + cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0); + pp_table->AConstant[1] = + cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1); + pp_table->AConstant[2] = + cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2); + pp_table->DC_tol_sigma = + cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); + pp_table->Platform_mean = + cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean); + pp_table->PSM_Age_CompFactor = + cpu_to_le16(avfs_params.usPsmAgeComfactor); + pp_table->Platform_sigma = + cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); + + for (i = 0; i < dep_table->count; i++) + pp_table->StaticVoltageOffsetVid[i] = (uint8_t) + (dep_table->entries[i].sclk_offset * + VOLTAGE_VID_OFFSET_SCALE2 / + VOLTAGE_VID_OFFSET_SCALE1); + + pp_table->OverrideBtcGbCksOn = + avfs_params.ucEnableGbVdroopTableCkson; + pp_table->OverrideAvfsGbCksOn = + avfs_params.ucEnableGbFuseTableCkson; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->disp_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->disp_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = + (int32_t)data->disp_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = + (int16_t)data->disp_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = + (int32_t)data->disp_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = + (int32_t)avfs_params.ulDispclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = + (int16_t)avfs_params.usDispclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = + (int32_t)avfs_params.ulDispclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->dcef_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->dcef_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = + (int32_t)data->dcef_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = + (int16_t)data->dcef_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = + (int32_t)data->dcef_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = + (int32_t)avfs_params.ulDcefclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = + (int16_t)avfs_params.usDcefclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = + (int32_t)avfs_params.ulDcefclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->pixel_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->pixel_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = + (int32_t)data->pixel_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = + (int16_t)data->pixel_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = + (int32_t)data->pixel_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = + (int32_t)avfs_params.ulPixelclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = + (int16_t)avfs_params.usPixelclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = + (int32_t)avfs_params.ulPixelclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->phy_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->phy_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = + (int32_t)data->phy_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = + (int16_t)data->phy_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = + (int32_t)data->phy_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = + (int32_t)avfs_params.ulPhyclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = + (int16_t)avfs_params.usPhyclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = + (int32_t)avfs_params.ulPhyclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12; + } else { + data->smu_features[GNLD_AVFS].supported = false; + } + } + + return 0; +} + +static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct pp_atomfwctrl_gpio_parameters gpio_params = {0}; + int result; + + result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params); + if (!result) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + (data->registry_data.regulator_hot_gpio_support)) { + pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio; + pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity; + pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio; + pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity; + } else { + pp_table->VR0HotGpio = 0; + pp_table->VR0HotPolarity = 0; + pp_table->VR1HotGpio = 0; + pp_table->VR1HotPolarity = 0; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition) && + (data->registry_data.ac_dc_switch_gpio_support)) { + pp_table->AcDcGpio = gpio_params.ucAcDcGpio; + pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity; + } else { + pp_table->AcDcGpio = 0; + pp_table->AcDcPolarity = 0; + } + } + + return result; +} + +static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_AVFS].supported) { + if (enable) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_AVFS].smu_feature_bitmap), + "[avfs_control] Attempt to Enable AVFS feature Failed!", + return -1); + data->smu_features[GNLD_AVFS].enabled = true; + } else { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + false, + data->smu_features[GNLD_AVFS].smu_feature_id), + "[avfs_control] Attempt to Disable AVFS feature Failed!", + return -1); + data->smu_features[GNLD_AVFS].enabled = false; + } + } + + return 0; +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PowerState) +* @return always 0 +*/ +static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct pp_atomfwctrl_voltage_table voltage_table; + + result = vega10_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to setup default DPM tables!", + return result); + + pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_SVID2, &voltage_table); + pp_table->MaxVidStep = voltage_table.max_vid_step; + + pp_table->GfxDpmVoltageMode = + (uint8_t)(table_info->uc_gfx_dpm_voltage_mode); + pp_table->SocDpmVoltageMode = + (uint8_t)(table_info->uc_soc_dpm_voltage_mode); + pp_table->UclkDpmVoltageMode = + (uint8_t)(table_info->uc_uclk_dpm_voltage_mode); + pp_table->UvdDpmVoltageMode = + (uint8_t)(table_info->uc_uvd_dpm_voltage_mode); + pp_table->VceDpmVoltageMode = + (uint8_t)(table_info->uc_vce_dpm_voltage_mode); + pp_table->Mp0DpmVoltageMode = + (uint8_t)(table_info->uc_mp0_dpm_voltage_mode); + pp_table->DisplayDpmVoltageMode = + (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); + + if (data->registry_data.ulv_support && + table_info->us_ulv_voltage_offset) { + result = vega10_populate_ulv_state(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ULV state!", + return result); + } + + result = vega10_populate_smc_link_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Link Level!", + return result); + + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Graphics Level!", + return result); + + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Memory Level!", + return result); + + result = vega10_populate_all_display_clock_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Display Level!", + return result); + + result = vega10_populate_smc_vce_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize VCE Level!", + return result); + + result = vega10_populate_smc_uvd_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize UVD Level!", + return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = vega10_populate_clock_stretcher_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate Clock Stretcher Table!", + return result); + } + + result = vega10_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize AVFS Parameters!", + return result); + + result = vega10_populate_gpio_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize GPIO Parameters!", + return result); + + pp_table->GfxclkAverageAlpha = (uint8_t) + (data->gfxclk_average_alpha); + pp_table->SocclkAverageAlpha = (uint8_t) + (data->socclk_average_alpha); + pp_table->UclkAverageAlpha = (uint8_t) + (data->uclk_average_alpha); + pp_table->GfxActivityAverageAlpha = (uint8_t) + (data->gfx_activity_average_alpha); + + result = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)pp_table, PPTABLE); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload PPtable!", return result); + + if (data->smu_features[GNLD_AVFS].supported) { + uint32_t features_enabled; + result = vega10_get_smc_features(hwmgr->smumgr, &features_enabled); + PP_ASSERT_WITH_CODE(!result, + "Failed to Retrieve Enabled Features!", + return result); + if (!(features_enabled & (1 << FEATURE_AVFS_BIT))) { + result = vega10_perform_btc(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to Perform BTC!", + return result); + result = vega10_avfs_enable(hwmgr, true); + PP_ASSERT_WITH_CODE(!result, + "Attempt to enable AVFS feature Failed!", + return result); + result = vega10_save_vft_table(hwmgr->smumgr, + (uint8_t *)&(data->smc_state_table.avfs_table)); + PP_ASSERT_WITH_CODE(!result, + "Attempt to save VFT table Failed!", + return result); + } else { + data->smu_features[GNLD_AVFS].enabled = true; + result = vega10_restore_vft_table(hwmgr->smumgr, + (uint8_t *)&(data->smc_state_table.avfs_table)); + PP_ASSERT_WITH_CODE(!result, + "Attempt to restore VFT table Failed!", + return result;); + } + } + + return 0; +} + +static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_THERMAL].supported) { + if (data->smu_features[GNLD_THERMAL].enabled) + pr_info("THERMAL Feature Already enabled!"); + + PP_ASSERT_WITH_CODE( + !vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_THERMAL].smu_feature_bitmap), + "Enable THERMAL Feature Failed!", + return -1); + data->smu_features[GNLD_THERMAL].enabled = true; + } + + return 0; +} + +static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot)) { + if (data->smu_features[GNLD_VR0HOT].supported) { + PP_ASSERT_WITH_CODE( + !vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_VR0HOT].smu_feature_bitmap), + "Attempt to Enable VR0 Hot feature Failed!", + return -1); + data->smu_features[GNLD_VR0HOT].enabled = true; + } else { + if (data->smu_features[GNLD_VR1HOT].supported) { + PP_ASSERT_WITH_CODE( + !vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_VR1HOT].smu_feature_bitmap), + "Attempt to Enable VR0 Hot feature Failed!", + return -1); + data->smu_features[GNLD_VR1HOT].enabled = true; + } + } + } + return 0; +} + +static int vega10_enable_ulv(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->registry_data.ulv_support) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_ULV].smu_feature_bitmap), + "Enable ULV Feature Failed!", + return -1); + data->smu_features[GNLD_ULV].enabled = true; + } + + return 0; +} + +static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DS_GFXCLK].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_DS_GFXCLK].enabled = true; + } + + if (data->smu_features[GNLD_DS_SOCCLK].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_DS_SOCCLK].enabled = true; + } + + if (data->smu_features[GNLD_DS_LCLK].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_DS_LCLK].enabled = true; + } + + return 0; +} + +/** + * @brief Tell SMC to enabled the supported DPMs. + * + * @param hwmgr - the address of the powerplay hardware manager. + * @Param bitmap - bitmap for the features to enabled. + * @return 0 on at least one DPM is successfully enabled. + */ +static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t i, feature_mask = 0; + + for (i = 0; i < GNLD_DPM_MAX; i++) { + if (data->smu_features[i].smu_feature_bitmap & bitmap) { + if (data->smu_features[i].supported) { + if (!data->smu_features[i].enabled) { + feature_mask |= data->smu_features[i]. + smu_feature_bitmap; + data->smu_features[i].enabled = true; + } + } + } + } + + if (vega10_enable_smc_features(hwmgr->smumgr, + true, feature_mask)) { + for (i = 0; i < GNLD_DPM_MAX; i++) { + if (data->smu_features[i].smu_feature_bitmap & + feature_mask) + data->smu_features[i].enabled = false; + } + } + + if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), + "Attempt to Enable LED DPM feature Failed!", return -EINVAL); + data->smu_features[GNLD_LED_DISPLAY].enabled = true; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + if (data->smu_features[GNLD_ACDC].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_ACDC].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_ACDC].enabled = true; + } + } + + return 0; +} + +static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + int tmp_result, result = 0; + + tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to configure telemetry!", + return tmp_result); + + vega10_set_tools_address(hwmgr->smumgr); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_NumOfDisplays, 0); + + tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(!tmp_result, + "DPM is already running right , skipping re-enablement!", + return 0); + + tmp_result = vega10_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to contruct voltage tables!", + result = tmp_result); + + tmp_result = vega10_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to initialize SMC table!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) { + tmp_result = vega10_enable_thermal_protection(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable thermal protection!", + result = tmp_result); + } + + tmp_result = vega10_enable_vrhot_feature(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable VR hot feature!", + result = tmp_result); + + tmp_result = vega10_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable ULV!", + result = tmp_result); + + tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable deep sleep master switch!", + result = tmp_result); + + tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to start DPM!", result = tmp_result); + + tmp_result = vega10_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable power containment!", + result = tmp_result); + + tmp_result = vega10_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to power control set level!", + result = tmp_result); + + return result; +} + +static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct vega10_power_state); +} + +static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + void *state, struct pp_power_state *power_state, + void *pp_table, uint32_t classification_flag) +{ + struct vega10_power_state *vega10_power_state = + cast_phw_vega10_power_state(&(power_state->hardware)); + struct vega10_performance_level *performance_level; + ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state; + ATOM_Vega10_POWERPLAYTABLE *powerplay_table = + (ATOM_Vega10_POWERPLAYTABLE *)pp_table; + ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table = + (ATOM_Vega10_SOCCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset)); + ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = + (ATOM_Vega10_GFXCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); + ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table = + (ATOM_Vega10_MCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + + + /* The following fields are not initialized here: + * id orderedList allStatesList + */ + power_state->classification.ui_label = + (le16_to_cpu(state_entry->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> + ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + power_state->classification.flags = classification_flag; + /* NOTE: There is a classification2 flag in BIOS + * that is not being used right now + */ + power_state->classification.temporary_state = false; + power_state->classification.to_be_deleted = false; + + power_state->validation.disallowOnDC = + ((le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Vega10_DISALLOW_ON_DC) != 0); + + power_state->display.disableFrameModulation = false; + power_state->display.limitRefreshrate = false; + power_state->display.enableVariBright = + ((le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Vega10_ENABLE_VARIBRIGHT) != 0); + + power_state->validation.supportedPowerLevels = 0; + power_state->uvd_clocks.VCLK = 0; + power_state->uvd_clocks.DCLK = 0; + power_state->temperatures.min = 0; + power_state->temperatures.max = 0; + + performance_level = &(vega10_power_state->performance_levels + [vega10_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (vega10_power_state->performance_level_count < + NUM_GFXCLK_DPM_LEVELS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (vega10_power_state->performance_level_count <= + hwmgr->platform_descriptor. + hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->soc_clock = socclk_dep_table->entries + [state_entry->ucSocClockIndexLow].ulClk; + performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucGfxClockIndexLow].ulClk; + performance_level->mem_clock = mclk_dep_table->entries + [state_entry->ucMemClockIndexLow].ulMemClk; + + performance_level = &(vega10_power_state->performance_levels + [vega10_power_state->performance_level_count++]); + + performance_level->soc_clock = socclk_dep_table->entries + [state_entry->ucSocClockIndexHigh].ulClk; + performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucGfxClockIndexHigh].ulClk; + performance_level->mem_clock = mclk_dep_table->entries + [state_entry->ucMemClockIndexHigh].ulMemClk; + return 0; +} + +static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct vega10_power_state *ps; + + state->hardware.magic = PhwVega10_Magic; + + ps = cast_phw_vega10_power_state(&state->hardware); + + result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state, + vega10_get_pp_table_entry_callback_func); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state + */ + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + return 0; +} + +static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + return 0; +} + +static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *request_ps, + const struct pp_power_state *current_ps) +{ + struct vega10_power_state *vega10_ps = + cast_phw_vega10_power_state(&request_ps->hardware); + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + bool disable_mclk_switching_for_vr; + bool force_mclk_high; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + int32_t count; + uint32_t stable_pstate_sclk_dpm_percentage; + uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + uint32_t latency; + + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + + if (vega10_ps->performance_level_count != 2) + pr_info("VI should always have 2 performance levels"); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + /* Cap clock DPM tables at DC MAX if it is in DC. */ + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < vega10_ps->performance_level_count; i++) { + if (vega10_ps->performance_levels[i].mem_clock > + max_limits->mclk) + vega10_ps->performance_levels[i].mem_clock = + max_limits->mclk; + if (vega10_ps->performance_levels[i].gfx_clock > + max_limits->sclk) + vega10_ps->performance_levels[i].gfx_clock = + max_limits->sclk; + } + } + + vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; + vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ + minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; + /* minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; */ + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + PP_ASSERT_WITH_CODE( + data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 && + data->registry_data.stable_pstate_sclk_dpm_percentage <= 100, + "percent sclk value must range from 1% to 100%, setting default value", + stable_pstate_sclk_dpm_percentage = 75); + + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * + stable_pstate_sclk_dpm_percentage) / 100; + + for (count = table_info->vdd_dep_on_sclk->count - 1; + count >= 0; count--) { + if (stable_pstate_sclk >= + table_info->vdd_dep_on_sclk->entries[count].clk) { + stable_pstate_sclk = + table_info->vdd_dep_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + vega10_ps->performance_levels[1].gfx_clock = + hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + vega10_ps->performance_levels[1].mem_clock = + hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchForVR); + force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ForceMclkHigh); + + disable_mclk_switching = (info.display_count > 1) || + disable_mclk_switching_for_frame_lock || + disable_mclk_switching_for_vr || + force_mclk_high; + + sclk = vega10_ps->performance_levels[0].gfx_clock; + mclk = vega10_ps->performance_levels[0].mem_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? + max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? + max_limits->mclk : minimum_clocks.memoryClock; + + vega10_ps->performance_levels[0].gfx_clock = sclk; + vega10_ps->performance_levels[0].mem_clock = mclk; + + vega10_ps->performance_levels[1].gfx_clock = + (vega10_ps->performance_levels[1].gfx_clock >= + vega10_ps->performance_levels[0].gfx_clock) ? + vega10_ps->performance_levels[1].gfx_clock : + vega10_ps->performance_levels[0].gfx_clock; + + if (disable_mclk_switching) { + /* Set Mclk the max of level 0 and level 1 */ + if (mclk < vega10_ps->performance_levels[1].mem_clock) + mclk = vega10_ps->performance_levels[1].mem_clock; + + /* Find the lowest MCLK frequency that is within + * the tolerable latency defined in DAL + */ + latency = 0; + for (i = 0; i < data->mclk_latency_table.count; i++) { + if ((data->mclk_latency_table.entries[i].latency <= latency) && + (data->mclk_latency_table.entries[i].frequency >= + vega10_ps->performance_levels[0].mem_clock) && + (data->mclk_latency_table.entries[i].frequency <= + vega10_ps->performance_levels[1].mem_clock)) + mclk = data->mclk_latency_table.entries[i].frequency; + } + vega10_ps->performance_levels[0].mem_clock = mclk; + } else { + if (vega10_ps->performance_levels[1].mem_clock < + vega10_ps->performance_levels[0].mem_clock) + vega10_ps->performance_levels[1].mem_clock = + vega10_ps->performance_levels[0].mem_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + for (i = 0; i < vega10_ps->performance_level_count; i++) { + vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk; + vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk; + } + } + + return 0; +} + +static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_single_dpm_table *sclk_table = + &(data->dpm_table.gfx_table); + uint32_t sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + struct vega10_single_dpm_table *mclk_table = + &(data->dpm_table.mem_table); + uint32_t mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_dpm_table = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport)) { + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (!(data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) { + /* Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if (data->display_timing.min_clock_in_sr != + min_clocks.engineClockInSR && + (min_clocks.engineClockInSR >= + VEGA10_MINIMUM_ENGINE_CLOCK || + data->display_timing.min_clock_in_sr >= + VEGA10_MINIMUM_ENGINE_CLOCK)) + data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != + info.display_count) + data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; + } else { + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if (data->display_timing.min_clock_in_sr != + min_clocks.engineClockInSR && + (min_clocks.engineClockInSR >= + VEGA10_MINIMUM_ENGINE_CLOCK || + data->display_timing.min_clock_in_sr >= + VEGA10_MINIMUM_ENGINE_CLOCK)) + data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (i >= mclk_table->count) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + if (data->display_timing.num_existing_displays != + info.display_count || + i >= mclk_table->count) + data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; + } + return 0; +} + +static int vega10_populate_and_upload_sclk_mclk_dpm_levels( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + uint32_t mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + struct vega10_dpm_table *dpm_table = &data->dpm_table; + struct vega10_dpm_table *golden_dpm_table = + &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport)) { + + if (!data->need_update_dpm_table && + !data->apply_optimized_settings && + !data->apply_overdrive_next_settings_mask) + return 0; + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_SCLK) { + for (dpm_count = 0; + dpm_count < dpm_table->gfx_table.count; + dpm_count++) { + dpm_table->gfx_table.dpm_levels[dpm_count].enabled = + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[dpm_count].enabled; + dpm_table->gfx_table.dpm_levels[dpm_count].value = + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[dpm_count].clock; + } + } + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_MCLK) { + for (dpm_count = 0; + dpm_count < dpm_table->mem_table.count; + dpm_count++) { + dpm_table->mem_table.dpm_levels[dpm_count].enabled = + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[dpm_count].enabled; + dpm_table->mem_table.dpm_levels[dpm_count].value = + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[dpm_count].clock; + } + } + + if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) || + data->apply_optimized_settings || + (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_SCLK)) { + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate SCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + + if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) || + (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_MCLK)){ + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate MCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + } else { + if (!data->need_update_dpm_table && + !data->apply_optimized_settings) + return 0; + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK && + data->smu_features[GNLD_DPM_GFXCLK].supported) { + dpm_table-> + gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. + value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on + * the default values + */ + PP_ASSERT_WITH_CODE( + golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value, + "Divide by 0!", + return -1); + + dpm_count = dpm_table->gfx_table.count < 2 ? + 0 : dpm_table->gfx_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (sclk > golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value) { + clock_percent = + ((sclk - golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value) * + 100) / + golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value; + + dpm_table->gfx_table.dpm_levels[i].value = + golden_dpm_table->gfx_table.dpm_levels[i].value + + (golden_dpm_table->gfx_table.dpm_levels[i].value * + clock_percent) / 100; + } else if (golden_dpm_table-> + gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value > + sclk) { + clock_percent = + ((golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value - + sclk) * 100) / + golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count-1].value; + + dpm_table->gfx_table.dpm_levels[i].value = + golden_dpm_table->gfx_table.dpm_levels[i].value - + (golden_dpm_table->gfx_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->gfx_table.dpm_levels[i].value = + golden_dpm_table->gfx_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK && + data->smu_features[GNLD_DPM_UCLK].supported) { + dpm_table-> + mem_table.dpm_levels[dpm_table->mem_table.count - 1]. + value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count - 1].value, + "Divide by 0!", + return -1); + + dpm_count = dpm_table->mem_table.count < 2 ? + 0 : dpm_table->mem_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (mclk > golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value) { + clock_percent = ((mclk - + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value) * + 100) / + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value; + + dpm_table->mem_table.dpm_levels[i].value = + golden_dpm_table->mem_table.dpm_levels[i].value + + (golden_dpm_table->mem_table.dpm_levels[i].value * + clock_percent) / 100; + } else if (golden_dpm_table->mem_table.dpm_levels + [dpm_table->mem_table.count-1].value > mclk) { + clock_percent = ((golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value - mclk) * + 100) / + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value; + + dpm_table->mem_table.dpm_levels[i].value = + golden_dpm_table->mem_table.dpm_levels[i].value - + (golden_dpm_table->mem_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->mem_table.dpm_levels[i].value = + golden_dpm_table->mem_table.dpm_levels[i].value; + } + } + } + + if ((data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) || + data->apply_optimized_settings) { + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate SCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate MCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + } + + return result; +} + +static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct vega10_single_dpm_table *dpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr, + struct vega10_single_dpm_table *dpm_table, + uint32_t low_limit, uint32_t high_limit, + uint32_t disable_dpm_mask) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else if (!((1 << i) & disable_dpm_mask)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr, + const struct vega10_power_state *vega10_ps) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1; + + vega10_trim_single_dpm_states(hwmgr, + &(data->dpm_table.soc_table), + vega10_ps->performance_levels[0].soc_clock, + vega10_ps->performance_levels[high_limit_count].soc_clock); + + vega10_trim_single_dpm_states_with_mask(hwmgr, + &(data->dpm_table.gfx_table), + vega10_ps->performance_levels[0].gfx_clock, + vega10_ps->performance_levels[high_limit_count].gfx_clock, + data->disable_dpm_mask); + + vega10_trim_single_dpm_states(hwmgr, + &(data->dpm_table.mem_table), + vega10_ps->performance_levels[0].mem_clock, + vega10_ps->performance_levels[high_limit_count].mem_clock); + + return 0; +} + +static uint32_t vega10_find_lowest_dpm_level( + struct vega10_single_dpm_table *table) +{ + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (table->dpm_levels[i].enabled) + break; + } + + return i; +} + +static uint32_t vega10_find_highest_dpm_level( + struct vega10_single_dpm_table *table) +{ + uint32_t i = 0; + + if (table->count <= MAX_REGULAR_DPM_NUMBER) { + for (i = table->count; i > 0; i--) { + if (table->dpm_levels[i - 1].enabled) + return i - 1; + } + } else { + pr_info("DPM Table Has Too Many Entries!"); + return MAX_REGULAR_DPM_NUMBER - 1; + } + + return i; +} + +static void vega10_apply_dal_minimum_voltage_request( + struct pp_hwmgr *hwmgr) +{ + return; +} + +static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + vega10_apply_dal_minimum_voltage_request(hwmgr); + + if (!data->registry_data.sclk_dpm_key_disabled) { + if (data->smc_state_table.gfx_boot_level != + data->dpm_table.gfx_table.dpm_state.soft_min_level) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinGfxclkByIndex, + data->smc_state_table.gfx_boot_level), + "Failed to set soft min sclk index!", + return -EINVAL); + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->smc_state_table.gfx_boot_level; + } + } + + if (!data->registry_data.mclk_dpm_key_disabled) { + if (data->smc_state_table.mem_boot_level != + data->dpm_table.mem_table.dpm_state.soft_min_level) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinUclkByIndex, + data->smc_state_table.mem_boot_level), + "Failed to set soft min mclk index!", + return -EINVAL); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->smc_state_table.mem_boot_level; + } + } + + return 0; +} + +static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + vega10_apply_dal_minimum_voltage_request(hwmgr); + + if (!data->registry_data.sclk_dpm_key_disabled) { + if (data->smc_state_table.gfx_max_level != + data->dpm_table.gfx_table.dpm_state.soft_max_level) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMaxGfxclkByIndex, + data->smc_state_table.gfx_max_level), + "Failed to set soft max sclk index!", + return -EINVAL); + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->smc_state_table.gfx_max_level; + } + } + + if (!data->registry_data.mclk_dpm_key_disabled) { + if (data->smc_state_table.mem_max_level != + data->dpm_table.mem_table.dpm_state.soft_max_level) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMaxUclkByIndex, + data->smc_state_table.mem_max_level), + "Failed to set soft max mclk index!", + return -EINVAL); + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->smc_state_table.mem_max_level; + } + } + + return 0; +} + +static int vega10_generate_dpm_level_enable_mask( + struct pp_hwmgr *hwmgr, const void *input) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + int i; + + PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), + "Attempt to Trim DPM States Failed!", + return -1); + + data->smc_state_table.gfx_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.gfx_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + data->smc_state_table.mem_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Attempt to upload DPM Bootup Levels Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Attempt to upload DPM Max Levels Failed!", + return -1); + for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++) + data->dpm_table.gfx_table.dpm_levels[i].enabled = true; + + + for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++) + data->dpm_table.mem_table.dpm_levels[i].enabled = true; + + return 0; +} + +int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DPM_VCE].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + enable, + data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap), + "Attempt to Enable/Disable DPM VCE Failed!", + return -1); + data->smu_features[GNLD_DPM_VCE].enabled = enable; + } + + return 0; +} + +static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + data->smc_state_table.pp_table.LowGfxclkInterruptThreshold = + cpu_to_le32(low_sclk_interrupt_threshold); + + /* This message will also enable SmcToHost Interrupt */ + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetLowGfxclkInterruptThreshold, + (uint32_t)low_sclk_interrupt_threshold); + } + + return result; +} + +static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, + const void *input) +{ + int tmp_result, result = 0; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + + tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to find DPM states clocks in DPM table!", + result = tmp_result); + + tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to populate and upload SCLK MCLK DPM levels!", + result = tmp_result); + + tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to generate DPM level enabled mask!", + result = tmp_result); + + tmp_result = vega10_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to update SCLK threshold!", + result = tmp_result); + + result = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)pp_table, PPTABLE); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload PPtable!", return result); + + data->apply_optimized_settings = false; + data->apply_overdrive_next_settings_mask = 0; + + return 0; +} + +static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct vega10_power_state *vega10_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + + if (low) + return vega10_ps->performance_levels[0].gfx_clock; + else + return vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; +} + +static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct vega10_power_state *vega10_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + + if (low) + return vega10_ps->performance_levels[0].mem_clock; + else + return vega10_ps->performance_levels + [vega10_ps->performance_level_count-1].mem_clock; +} + +static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, + void *value, int *size) +{ + uint32_t sclk_idx, mclk_idx, activity_percent = 0; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_dpm_table *dpm_table = &data->dpm_table; + int ret = 0; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex); + if (!ret) { + vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx); + *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; + *size = 4; + } + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex); + if (!ret) { + vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx); + *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; + *size = 4; + } + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0); + if (!ret) { + vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent); + *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; + *size = 4; + } + break; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr); + *size = 4; + break; + case AMDGPU_PP_SENSOR_UVD_POWER: + *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VCE_POWER: + *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; + *size = 4; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, + bool has_disp) +{ + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetUclkFastSwitch, + has_disp ? 0 : 1); +} + +int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock_req) +{ + int result = 0; + enum amd_pp_clock_type clk_type = clock_req->clock_type; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 100; + DSPCLK_e clk_select = 0; + uint32_t clk_request = 0; + + switch (clk_type) { + case amd_pp_dcef_clock: + clk_select = DSPCLK_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_select = DSPCLK_DISPCLK; + break; + case amd_pp_pixel_clock: + clk_select = DSPCLK_PIXCLK; + break; + case amd_pp_phy_clock: + clk_select = DSPCLK_PHYCLK; + break; + default: + pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); + result = -1; + break; + } + + if (!result) { + clk_request = (clk_freq << 16) | clk_select; + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_RequestDisplayClockByFreq, + clk_request); + } + + return result; +} + +static int vega10_notify_smc_display_config_after_ps_adjustment( + struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_single_dpm_table *dpm_table = + &data->dpm_table.dcef_table; + uint32_t num_active_disps = 0; + struct cgs_display_info info = {0}; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct pp_display_clock_request clock_req; + + info.mode_info = NULL; + + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_disps = info.display_count; + + if (num_active_disps > 1) + vega10_notify_smc_display_change(hwmgr, false); + else + vega10_notify_smc_display_change(hwmgr, true); + + min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; + + for (i = 0; i < dpm_table->count; i++) { + if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock) + break; + } + + if (i < dpm_table->count) { + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; + if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk, + min_clocks.dcefClockInSR), + "Attempt to set divider for DCEFCLK Failed!",); + } else + pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); + } else + pr_info("Cannot find requested DCEFCLK!"); + + return 0; +} + +static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->smc_state_table.gfx_boot_level = + data->smc_state_table.gfx_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + data->smc_state_table.mem_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to highest!", + return -1); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -1); + + return 0; +} + +static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->smc_state_table.gfx_boot_level = + data->smc_state_table.gfx_max_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + data->smc_state_table.mem_max_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to highest!", + return -1); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -1); + + return 0; + +} + +static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + data->smc_state_table.gfx_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.gfx_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + data->smc_state_table.mem_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload DPM Bootup Levels!", + return -1); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload DPM Max Levels!", + return -1); + return 0; +} + +static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = vega10_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = vega10_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = vega10_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + + return ret; +} + +static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + vega10_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + vega10_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + + if (hwmgr->fan_ctrl_is_in_default_mode) { + return hwmgr->fan_ctrl_default_mode; + } else { + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + return (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> + CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + } +} + +static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_clock_and_voltage_limits *max_limits = + &table_info->max_clock_voltage_on_ac; + + info->engine_max_clock = max_limits->sclk; + info->memory_max_clock = max_limits->mclk; + + return 0; +} + +static void vega10_get_sclks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_sclk; + uint32_t i; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + dep_table->entries[i].clk; + clocks->num_levels++; + } + } + +} + +static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr, + uint32_t clock) +{ + if (clock >= MEM_FREQ_LOW_LATENCY && + clock < MEM_FREQ_HIGH_LATENCY) + return MEM_LATENCY_HIGH; + else if (clock >= MEM_FREQ_HIGH_LATENCY) + return MEM_LATENCY_LOW; + else + return MEM_LATENCY_ERR; +} + +static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_mclk; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t i; + + clocks->num_levels = 0; + data->mclk_latency_table.count = 0; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + data->mclk_latency_table.entries + [data->mclk_latency_table.count].frequency = + dep_table->entries[i].clk; + clocks->data[clocks->num_levels].latency_in_us = + data->mclk_latency_table.entries + [data->mclk_latency_table.count].latency = + vega10_get_mem_latency(hwmgr, + dep_table->entries[i].clk); + clocks->num_levels++; + data->mclk_latency_table.count++; + } + } +} + +static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_dcefclk; + uint32_t i; + + for (i = 0; i < dep_table->count; i++) { + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].latency_in_us = 0; + clocks->num_levels++; + } +} + +static void vega10_get_socclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_socclk; + uint32_t i; + + for (i = 0; i < dep_table->count; i++) { + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].latency_in_us = 0; + clocks->num_levels++; + } +} + +static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) +{ + switch (type) { + case amd_pp_sys_clock: + vega10_get_sclks(hwmgr, clocks); + break; + case amd_pp_mem_clock: + vega10_get_memclocks(hwmgr, clocks); + break; + case amd_pp_dcef_clock: + vega10_get_dcefclocks(hwmgr, clocks); + break; + case amd_pp_soc_clock: + vega10_get_socclocks(hwmgr, clocks); + break; + default: + return -1; + } + + return 0; +} + +static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + uint32_t i; + + switch (type) { + case amd_pp_mem_clock: + dep_table = table_info->vdd_dep_on_mclk; + break; + case amd_pp_dcef_clock: + dep_table = table_info->vdd_dep_on_dcefclk; + break; + case amd_pp_disp_clock: + dep_table = table_info->vdd_dep_on_dispclk; + break; + case amd_pp_pixel_clock: + dep_table = table_info->vdd_dep_on_pixclk; + break; + case amd_pp_phy_clock: + dep_table = table_info->vdd_dep_on_phyclk; + break; + default: + return -1; + } + + for (i = 0; i < dep_table->count; i++) { + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table-> + entries[dep_table->entries[i].vddInd].us_vdd); + clocks->num_levels++; + } + + if (i < dep_table->count) + return -1; + + return 0; +} + +static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + Watermarks_t *table = &(data->smc_state_table.water_marks_table); + int result = 0; + uint32_t i; + + if (!data->registry_data.disable_water_mark) { + for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { + table->WatermarkRow[WM_DCEFCLK][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; + } + + for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { + table->WatermarkRow[WM_SOCCLK][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; + } + data->water_marks_bitmap = WaterMarksExist; + } + + return result; +} + +static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t i; + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (data->registry_data.sclk_dpm_key_disabled) + break; + + for (i = 0; i < 32; i++) { + if (mask & (1 << i)) + break; + } + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinGfxclkByIndex, + i), + "Failed to set soft min sclk index!", + return -1); + break; + + case PP_MCLK: + if (data->registry_data.mclk_dpm_key_disabled) + break; + + for (i = 0; i < 32; i++) { + if (mask & (1 << i)) + break; + } + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinUclkByIndex, + i), + "Failed to set soft min mclk index!", + return -1); + break; + + case PP_PCIE: + if (data->registry_data.pcie_dpm_key_disabled) + break; + + for (i = 0; i < 32; i++) { + if (mask & (1 << i)) + break; + } + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetMinLinkDpmByIndex, + i), + "Failed to set min pcie index!", + return -1); + break; + default: + break; + } + + return 0; +} + +static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); + struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); + struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); + int i, now, size = 0; + + switch (type) { + case PP_SCLK: + if (data->registry_data.sclk_dpm_key_disabled) + break; + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentGfxclkIndex), + "Attempt to get current sclk index Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + &now), + "Attempt to read sclk index Failed!", + return -1); + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + if (data->registry_data.mclk_dpm_key_disabled) + break; + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentUclkIndex), + "Attempt to get current mclk index Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + &now), + "Attempt to read mclk index Failed!", + return -1); + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentLinkIndex), + "Attempt to get current mclk index Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + &now), + "Attempt to read mclk index Failed!", + return -1); + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" : + (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" : + (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + int result = 0; + uint32_t num_turned_on_displays = 1; + Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); + struct cgs_display_info info = {0}; + + if ((data->water_marks_bitmap & WaterMarksExist) && + !(data->water_marks_bitmap & WaterMarksLoaded)) { + result = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)wm_table, WMTABLE); + PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL); + data->water_marks_bitmap |= WaterMarksLoaded; + } + + if (data->water_marks_bitmap & WaterMarksLoaded) { + cgs_get_active_displays_info(hwmgr->device, &info); + num_turned_on_displays = info.display_count; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_NumOfDisplays, num_turned_on_displays); + } + + return result; +} + +int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DPM_UVD].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + enable, + data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap), + "Attempt to Enable/Disable DPM UVD Failed!", + return -1); + data->smu_features[GNLD_DPM_UVD].enabled = enable; + } + return 0; +} + +static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + data->vce_power_gated = bgate; + return vega10_enable_disable_vce_dpm(hwmgr, !bgate); +} + +static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = bgate; + return vega10_enable_disable_uvd_dpm(hwmgr, !bgate); +} + +static inline bool vega10_are_power_levels_equal( + const struct vega10_performance_level *pl1, + const struct vega10_performance_level *pl2) +{ + return ((pl1->soc_clock == pl2->soc_clock) && + (pl1->gfx_clock == pl2->gfx_clock) && + (pl1->mem_clock == pl2->mem_clock)); +} + +static int vega10_check_states_equal(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct vega10_power_state *psa; + const struct vega10_power_state *psb; + int i; + + if (pstate1 == NULL || pstate2 == NULL || equal == NULL) + return -EINVAL; + + psa = cast_const_phw_vega10_power_state(pstate1); + psb = cast_const_phw_vega10_power_state(pstate2); + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); + *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + + return 0; +} + +static bool +vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0, 0, NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) + is_update_required = true; + } + + return is_update_required; +} + +static const struct pp_hwmgr_func vega10_hwmgr_funcs = { + .backend_init = vega10_hwmgr_backend_init, + .backend_fini = vega10_hwmgr_backend_fini, + .asic_setup = vega10_setup_asic_task, + .dynamic_state_management_enable = vega10_enable_dpm_tasks, + .get_num_of_pp_table_entries = + vega10_get_number_of_powerplay_table_entries, + .get_power_state_size = vega10_get_power_state_size, + .get_pp_table_entry = vega10_get_pp_table_entry, + .patch_boot_state = vega10_patch_boot_state, + .apply_state_adjust_rules = vega10_apply_state_adjust_rules, + .power_state_set = vega10_set_power_state_tasks, + .get_sclk = vega10_dpm_get_sclk, + .get_mclk = vega10_dpm_get_mclk, + .notify_smc_display_config_after_ps_adjustment = + vega10_notify_smc_display_config_after_ps_adjustment, + .force_dpm_level = vega10_dpm_force_dpm_level, + .get_temperature = vega10_thermal_get_temperature, + .stop_thermal_controller = vega10_thermal_stop_thermal_controller, + .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = + vega10_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = + vega10_thermal_ctrl_uninitialize_thermal_controller, + .set_fan_control_mode = vega10_set_fan_control_mode, + .get_fan_control_mode = vega10_get_fan_control_mode, + .read_sensor = vega10_read_sensor, + .get_dal_power_level = vega10_get_dal_power_level, + .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency, + .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage, + .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges, + .display_clock_voltage_request = vega10_display_clock_voltage_request, + .force_clock_level = vega10_force_clock_level, + .print_clock_levels = vega10_print_clock_levels, + .display_config_changed = vega10_display_configuration_changed_task, + .powergate_uvd = vega10_power_gate_uvd, + .powergate_vce = vega10_power_gate_vce, + .check_states_equal = vega10_check_states_equal, + .check_smc_update_required_for_display_configuration = + vega10_check_smc_update_required_for_display_configuration, +}; + +int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + hwmgr->hwmgr_func = &vega10_hwmgr_funcs; + hwmgr->pptable_func = &vega10_pptable_funcs; + pp_vega10_thermal_initialize(hwmgr); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h new file mode 100644 index 000000000000..83c67b9262ff --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -0,0 +1,434 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _VEGA10_HWMGR_H_ +#define _VEGA10_HWMGR_H_ + +#include "hwmgr.h" +#include "smu9_driver_if.h" +#include "ppatomctrl.h" +#include "ppatomfwctrl.h" +#include "vega10_ppsmc.h" +#include "vega10_powertune.h" + +extern const uint32_t PhwVega10_Magic; +#define VEGA10_MAX_HARDWARE_POWERLEVELS 2 + +#define WaterMarksExist 1 +#define WaterMarksLoaded 2 + +enum { + GNLD_DPM_PREFETCHER = 0, + GNLD_DPM_GFXCLK, + GNLD_DPM_UCLK, + GNLD_DPM_SOCCLK, + GNLD_DPM_UVD, + GNLD_DPM_VCE, + GNLD_ULV, + GNLD_DPM_MP0CLK, + GNLD_DPM_LINK, + GNLD_DPM_DCEFCLK, + GNLD_AVFS, + GNLD_DS_GFXCLK, + GNLD_DS_SOCCLK, + GNLD_DS_LCLK, + GNLD_PPT, + GNLD_TDC, + GNLD_THERMAL, + GNLD_GFX_PER_CU_CG, + GNLD_RM, + GNLD_DS_DCEFCLK, + GNLD_ACDC, + GNLD_VR0HOT, + GNLD_VR1HOT, + GNLD_FW_CTF, + GNLD_LED_DISPLAY, + GNLD_FAN_CONTROL, + GNLD_VOLTAGE_CONTROLLER, + GNLD_FEATURES_MAX +}; + +#define GNLD_DPM_MAX (GNLD_DPM_DCEFCLK + 1) + +#define SMC_DPM_FEATURES 0x30F + +struct smu_features { + bool supported; + bool enabled; + uint32_t smu_feature_id; + uint32_t smu_feature_bitmap; +}; + +struct vega10_performance_level { + uint32_t soc_clock; + uint32_t gfx_clock; + uint32_t mem_clock; +}; + +struct vega10_bacos { + uint32_t baco_flags; + /* struct vega10_performance_level performance_level; */ +}; + +struct vega10_uvd_clocks { + uint32_t vclk; + uint32_t dclk; +}; + +struct vega10_vce_clocks { + uint32_t evclk; + uint32_t ecclk; +}; + +struct vega10_power_state { + uint32_t magic; + struct vega10_uvd_clocks uvd_clks; + struct vega10_vce_clocks vce_clks; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct vega10_performance_level performance_levels[VEGA10_MAX_HARDWARE_POWERLEVELS]; +}; + +struct vega10_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +#define VEGA10_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define MAX_PCIE_CONF 2 +#define VEGA10_MINIMUM_ENGINE_CLOCK 2500 + +struct vega10_dpm_state { + uint32_t soft_min_level; + uint32_t soft_max_level; + uint32_t hard_min_level; + uint32_t hard_max_level; +}; + +struct vega10_single_dpm_table { + uint32_t count; + struct vega10_dpm_state dpm_state; + struct vega10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_pcie_table { + uint16_t count; + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; + uint32_t lclk[MAX_PCIE_CONF]; +}; + +struct vega10_dpm_table { + struct vega10_single_dpm_table soc_table; + struct vega10_single_dpm_table gfx_table; + struct vega10_single_dpm_table mem_table; + struct vega10_single_dpm_table eclk_table; + struct vega10_single_dpm_table vclk_table; + struct vega10_single_dpm_table dclk_table; + struct vega10_single_dpm_table dcef_table; + struct vega10_single_dpm_table pixel_table; + struct vega10_single_dpm_table display_table; + struct vega10_single_dpm_table phy_table; + struct vega10_pcie_table pcie_table; +}; + +#define VEGA10_MAX_LEAKAGE_COUNT 8 +struct vega10_leakage_voltage { + uint16_t count; + uint16_t leakage_id[VEGA10_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[VEGA10_MAX_LEAKAGE_COUNT]; +}; + +struct vega10_display_timing { + uint32_t min_clock_in_sr; + uint32_t num_existing_displays; +}; + +struct vega10_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; +}; + +struct vega10_vbios_boot_state { + uint16_t vddc; + uint16_t vddci; + uint32_t gfx_clock; + uint32_t mem_clock; + uint32_t soc_clock; +}; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 +#define DPMTABLE_OD_UPDATE_VDDC 0x00000010 + +struct vega10_smc_state_table { + uint32_t soc_boot_level; + uint32_t gfx_boot_level; + uint32_t dcef_boot_level; + uint32_t mem_boot_level; + uint32_t uvd_boot_level; + uint32_t vce_boot_level; + uint32_t gfx_max_level; + uint32_t mem_max_level; + uint8_t vr_hot_gpio; + uint8_t ac_dc_gpio; + uint8_t therm_out_gpio; + uint8_t therm_out_polarity; + uint8_t therm_out_mode; + PPTable_t pp_table; + Watermarks_t water_marks_table; + AvfsTable_t avfs_table; +}; + +struct vega10_mclk_latency_entries { + uint32_t frequency; + uint32_t latency; +}; + +struct vega10_mclk_latency_table { + uint32_t count; + struct vega10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_registry_data { + uint8_t ac_dc_switch_gpio_support; + uint8_t avfs_support; + uint8_t cac_support; + uint8_t clock_stretcher_support; + uint8_t db_ramping_support; + uint8_t didt_support; + uint8_t dynamic_state_patching_support; + uint8_t enable_pkg_pwr_tracking_feature; + uint8_t enable_tdc_limit_feature; + uint32_t fast_watermark_threshold; + uint8_t force_dpm_high; + uint8_t fuzzy_fan_control_support; + uint8_t long_idle_baco_support; + uint8_t mclk_dpm_key_disabled; + uint8_t od_state_in_dc_support; + uint8_t pcieLaneOverride; + uint8_t pcieSpeedOverride; + uint32_t pcieClockOverride; + uint8_t pcie_dpm_key_disabled; + uint8_t dcefclk_dpm_key_disabled; + uint8_t power_containment_support; + uint8_t ppt_support; + uint8_t prefetcher_dpm_key_disabled; + uint8_t quick_transition_support; + uint8_t regulator_hot_gpio_support; + uint8_t sclk_deep_sleep_support; + uint8_t sclk_dpm_key_disabled; + uint8_t sclk_from_vbios; + uint8_t sclk_throttle_low_notification; + uint8_t show_baco_dbg_info; + uint8_t skip_baco_hardware; + uint8_t socclk_dpm_key_disabled; + uint8_t spll_shutdown_support; + uint8_t sq_ramping_support; + uint32_t stable_pstate_sclk_dpm_percentage; + uint8_t tcp_ramping_support; + uint8_t tdc_support; + uint8_t td_ramping_support; + uint8_t thermal_out_gpio_support; + uint8_t thermal_support; + uint8_t fw_ctf_enabled; + uint8_t fan_control_support; + uint8_t ulps_support; + uint8_t ulv_support; + uint32_t vddc_vddci_delta; + uint8_t odn_feature_enable; + uint8_t disable_water_mark; + uint8_t zrpm_stop_temp; + uint8_t zrpm_start_temp; + uint8_t led_dpm_enabled; + uint8_t vr0hot_enabled; + uint8_t vr1hot_enabled; +}; + +struct vega10_odn_clock_voltage_dependency_table { + uint32_t count; + struct phm_ppt_v1_clock_voltage_dependency_record + entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_odn_dpm_table { + struct phm_odn_clock_levels odn_core_clock_dpm_levels; + struct phm_odn_clock_levels odn_memory_clock_dpm_levels; + struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; +}; + +struct vega10_odn_fan_table { + uint32_t target_fan_speed; + uint32_t target_temperature; + uint32_t min_performance_clock; + uint32_t min_fan_limit; +}; + +struct vega10_hwmgr { + struct vega10_dpm_table dpm_table; + struct vega10_dpm_table golden_dpm_table; + struct vega10_registry_data registry_data; + struct vega10_vbios_boot_state vbios_boot_state; + struct vega10_mclk_latency_table mclk_latency_table; + + struct vega10_leakage_voltage vddc_leakage; + + uint32_t vddc_control; + struct pp_atomfwctrl_voltage_table vddc_voltage_table; + uint32_t mvdd_control; + struct pp_atomfwctrl_voltage_table mvdd_voltage_table; + uint32_t vddci_control; + struct pp_atomfwctrl_voltage_table vddci_voltage_table; + + uint32_t active_auto_throttle_sources; + uint32_t water_marks_bitmap; + struct vega10_bacos bacos; + + struct vega10_odn_dpm_table odn_dpm_table; + struct vega10_odn_fan_table odn_fan_table; + + /* ---- General data ---- */ + uint8_t need_update_dpm_table; + + bool cac_enabled; + bool battery_state; + bool is_tlu_enabled; + + uint32_t low_sclk_interrupt_threshold; + + uint32_t total_active_cus; + + struct vega10_display_timing display_timing; + + /* ---- Vega10 Dyn Register Settings ---- */ + + uint32_t debug_settings; + uint32_t lowest_uclk_reserved_for_ulv; + uint32_t gfxclk_average_alpha; + uint32_t socclk_average_alpha; + uint32_t uclk_average_alpha; + uint32_t gfx_activity_average_alpha; + uint32_t display_voltage_mode; + uint32_t dcef_clk_quad_eqn_a; + uint32_t dcef_clk_quad_eqn_b; + uint32_t dcef_clk_quad_eqn_c; + uint32_t disp_clk_quad_eqn_a; + uint32_t disp_clk_quad_eqn_b; + uint32_t disp_clk_quad_eqn_c; + uint32_t pixel_clk_quad_eqn_a; + uint32_t pixel_clk_quad_eqn_b; + uint32_t pixel_clk_quad_eqn_c; + uint32_t phy_clk_quad_eqn_a; + uint32_t phy_clk_quad_eqn_b; + uint32_t phy_clk_quad_eqn_c; + + /* ---- Thermal Temperature Setting ---- */ + struct vega10_dpmlevel_enable_mask dpm_level_enable_mask; + + /* ---- Power Gating States ---- */ + bool uvd_power_gated; + bool vce_power_gated; + bool samu_power_gated; + bool need_long_memory_training; + + /* Internal settings to apply the application power optimization parameters */ + bool apply_optimized_settings; + uint32_t disable_dpm_mask; + + /* ---- Overdrive next setting ---- */ + uint32_t apply_overdrive_next_settings_mask; + + /* ---- Workload Mask ---- */ + uint32_t workload_mask; + + /* ---- SMU9 ---- */ + struct smu_features smu_features[GNLD_FEATURES_MAX]; + struct vega10_smc_state_table smc_state_table; + + uint32_t config_telemetry; +}; + +#define VEGA10_DPM2_NEAR_TDP_DEC 10 +#define VEGA10_DPM2_ABOVE_SAFE_INC 5 +#define VEGA10_DPM2_BELOW_SAFE_INC 20 + +#define VEGA10_DPM2_LTA_WINDOW_SIZE 7 + +#define VEGA10_DPM2_LTS_TRUNCATE 0 + +#define VEGA10_DPM2_TDP_SAFE_LIMIT_PERCENT 80 + +#define VEGA10_DPM2_MAXPS_PERCENT_M 90 +#define VEGA10_DPM2_MAXPS_PERCENT_H 90 + +#define VEGA10_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define VEGA10_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define VEGA10_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define VEGA10_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define VEGA10_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define VEGA10_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define VEGA10_VOLTAGE_CONTROL_NONE 0x0 +#define VEGA10_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define VEGA10_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define VEGA10_VOLTAGE_CONTROL_MERGED 0x3 +/* To convert to Q8.8 format for firmware */ +#define VEGA10_Q88_FORMAT_CONVERSION_UNIT 256 + +#define VEGA10_UNUSED_GPIO_PIN 0x7F + +#define VEGA10_THERM_OUT_MODE_DISABLE 0x0 +#define VEGA10_THERM_OUT_MODE_THERM_ONLY 0x1 +#define VEGA10_THERM_OUT_MODE_THERM_VRHOT 0x2 + +#define PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT 0xffffffff +#define PPREGKEY_VEGA10QUADRATICEQUATION_DFLT 0xffffffff + +#define PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ + +extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); +extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); +extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); +extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); +extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display); +int vega10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); +int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); + +#endif /* _VEGA10_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h new file mode 100644 index 000000000000..8c55eaa3c32b --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h @@ -0,0 +1,44 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA10_INC_H +#define VEGA10_INC_H + +#include "asic_reg/vega10/THM/thm_9_0_default.h" +#include "asic_reg/vega10/THM/thm_9_0_offset.h" +#include "asic_reg/vega10/THM/thm_9_0_sh_mask.h" + +#include "asic_reg/vega10/MP/mp_9_0_default.h" +#include "asic_reg/vega10/MP/mp_9_0_offset.h" +#include "asic_reg/vega10/MP/mp_9_0_sh_mask.h" + +#include "asic_reg/vega10/GC/gc_9_0_default.h" +#include "asic_reg/vega10/GC/gc_9_0_offset.h" +#include "asic_reg/vega10/GC/gc_9_0_sh_mask.h" + +#include "asic_reg/vega10/NBIO/nbio_6_1_default.h" +#include "asic_reg/vega10/NBIO/nbio_6_1_offset.h" +#include "asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h" + + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c new file mode 100644 index 000000000000..f1e244cd2370 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -0,0 +1,137 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "vega10_hwmgr.h" +#include "vega10_powertune.h" +#include "vega10_smumgr.h" +#include "vega10_ppsmc.h" +#include "pp_debug.h" + +void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = table_info->tdp_table; + PPTable_t *table = &(data->smc_state_table.pp_table); + + table->SocketPowerLimit = cpu_to_le16( + tdp_table->usMaximumPowerDeliveryLimit); + table->TdcLimit = cpu_to_le16(tdp_table->usTDC); + table->EdcLimit = cpu_to_le16(tdp_table->usEDCLimit); + table->TedgeLimit = cpu_to_le16(tdp_table->usTemperatureLimitTedge); + table->ThotspotLimit = cpu_to_le16(tdp_table->usTemperatureLimitHotspot); + table->ThbmLimit = cpu_to_le16(tdp_table->usTemperatureLimitHBM); + table->Tvr_socLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrVddc); + table->Tvr_memLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrMvdd); + table->Tliquid1Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid1); + table->Tliquid2Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid2); + table->TplxLimit = cpu_to_le16(tdp_table->usTemperatureLimitPlx); + table->LoadLineResistance = cpu_to_le16( + hwmgr->platform_descriptor.LoadLineSlope); + table->FitLimit = 0; /* Not used for Vega10 */ + + table->Liquid1_I2C_address = tdp_table->ucLiquid1_I2C_address; + table->Liquid2_I2C_address = tdp_table->ucLiquid2_I2C_address; + table->Vr_I2C_address = tdp_table->ucVr_I2C_address; + table->Plx_I2C_address = tdp_table->ucPlx_I2C_address; + + table->Liquid_I2C_LineSCL = tdp_table->ucLiquid_I2C_Line; + table->Liquid_I2C_LineSDA = tdp_table->ucLiquid_I2C_LineSDA; + + table->Vr_I2C_LineSCL = tdp_table->ucVr_I2C_Line; + table->Vr_I2C_LineSDA = tdp_table->ucVr_I2C_LineSDA; + + table->Plx_I2C_LineSCL = tdp_table->ucPlx_I2C_Line; + table->Plx_I2C_LineSDA = tdp_table->ucPlx_I2C_LineSDA; +} + +int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->registry_data.enable_pkg_pwr_tracking_feature) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetPptLimit, n); + + return 0; +} + +int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = table_info->tdp_table; + uint32_t default_pwr_limit = + (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (data->smu_features[GNLD_PPT].supported) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_PPT].smu_feature_bitmap), + "Attempt to enable PPT feature Failed!", + data->smu_features[GNLD_PPT].supported = false); + + if (data->smu_features[GNLD_TDC].supported) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_TDC].smu_feature_bitmap), + "Attempt to enable PPT feature Failed!", + data->smu_features[GNLD_TDC].supported = false); + + result = vega10_set_power_limit(hwmgr, default_pwr_limit); + PP_ASSERT_WITH_CODE(!result, + "Failed to set Default Power Limit in SMC!", + return result); + } + + return result; +} + +static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, + uint32_t adjust_percent) +{ + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_OverDriveSetPercentage, adjust_percent); +} + +int vega10_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + int adjust_percent, result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + adjust_percent = + hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + result = vega10_set_overdrive_target_percentage(hwmgr, + (uint32_t)adjust_percent); + } + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h new file mode 100644 index 000000000000..d9662bf4a4b4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h @@ -0,0 +1,65 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA10_POWERTUNE_H_ +#define _VEGA10_POWERTUNE_H_ + +enum vega10_pt_config_reg_type { + VEGA10_CONFIGREG_MMR = 0, + VEGA10_CONFIGREG_SMC_IND, + VEGA10_CONFIGREG_DIDT_IND, + VEGA10_CONFIGREG_CACHE, + VEGA10_CONFIGREG_MAX +}; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +struct vega10_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum vega10_pt_config_reg_type type; +}; + +struct vega10_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; +}; + +void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int vega10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int vega10_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int vega10_enable_smc_cac(struct pp_hwmgr *hwmgr); +int vega10_enable_power_containment(struct pp_hwmgr *hwmgr); +int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int vega10_power_control_set_level(struct pp_hwmgr *hwmgr); + +#endif /* _VEGA10_POWERTUNE_H_ */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h new file mode 100644 index 000000000000..6a907c93fd9c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h @@ -0,0 +1,381 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA10_PPTABLE_H_ +#define _VEGA10_PPTABLE_H_ + +#pragma pack(push, 1) + +#define ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f +#define ATOM_VEGA10_PP_FANPARAMETERS_NOFAN 0x80 + +#define ATOM_VEGA10_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_VEGA10_PP_THERMALCONTROLLER_LM96163 17 +#define ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 24 + +#define ATOM_VEGA10_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 +#define ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D + +#define ATOM_VEGA10_PP_PLATFORM_CAP_POWERPLAY 0x1 +#define ATOM_VEGA10_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 +#define ATOM_VEGA10_PP_PLATFORM_CAP_HARDWAREDC 0x4 +#define ATOM_VEGA10_PP_PLATFORM_CAP_BACO 0x8 +#define ATOM_VEGA10_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x10 + + +/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */ +#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 +#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 +#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 +#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 +/* 2, 4, 6, 7 are reserved */ + +#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 +#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 +#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 +#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 +#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 +#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 + +/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */ +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 + +#define ATOM_Vega10_DISALLOW_ON_DC 0x00004000 +#define ATOM_Vega10_ENABLE_VARIBRIGHT 0x00008000 + +#define ATOM_Vega10_TABLE_REVISION_VEGA10 8 + +#define ATOM_Vega10_VoltageMode_AVFS_Interpolate 0 +#define ATOM_Vega10_VoltageMode_AVFS_WorstCase 1 +#define ATOM_Vega10_VoltageMode_Static 2 + +typedef struct _ATOM_Vega10_POWERPLAYTABLE { + struct atom_common_table_header sHeader; + UCHAR ucTableRevision; + USHORT usTableSize; /* the size of header structure */ + ULONG ulGoldenPPID; /* PPGen use only */ + ULONG ulGoldenRevision; /* PPGen use only */ + USHORT usFormatID; /* PPGen use only */ + ULONG ulPlatformCaps; /* See ATOM_Vega10_CAPS_* */ + ULONG ulMaxODEngineClock; /* For Overdrive. */ + ULONG ulMaxODMemoryClock; /* For Overdrive. */ + USHORT usPowerControlLimit; + USHORT usUlvVoltageOffset; /* in mv units */ + USHORT usUlvSmnclkDid; + USHORT usUlvMp1clkDid; + USHORT usUlvGfxclkBypass; + USHORT usGfxclkSlewRate; + UCHAR ucGfxVoltageMode; + UCHAR ucSocVoltageMode; + UCHAR ucUclkVoltageMode; + UCHAR ucUvdVoltageMode; + UCHAR ucVceVoltageMode; + UCHAR ucMp0VoltageMode; + UCHAR ucDcefVoltageMode; + USHORT usStateArrayOffset; /* points to ATOM_Vega10_State_Array */ + USHORT usFanTableOffset; /* points to ATOM_Vega10_Fan_Table */ + USHORT usThermalControllerOffset; /* points to ATOM_Vega10_Thermal_Controller */ + USHORT usSocclkDependencyTableOffset; /* points to ATOM_Vega10_SOCCLK_Dependency_Table */ + USHORT usMclkDependencyTableOffset; /* points to ATOM_Vega10_MCLK_Dependency_Table */ + USHORT usGfxclkDependencyTableOffset; /* points to ATOM_Vega10_GFXCLK_Dependency_Table */ + USHORT usDcefclkDependencyTableOffset; /* points to ATOM_Vega10_DCEFCLK_Dependency_Table */ + USHORT usVddcLookupTableOffset; /* points to ATOM_Vega10_Voltage_Lookup_Table */ + USHORT usVddmemLookupTableOffset; /* points to ATOM_Vega10_Voltage_Lookup_Table */ + USHORT usMMDependencyTableOffset; /* points to ATOM_Vega10_MM_Dependency_Table */ + USHORT usVCEStateTableOffset; /* points to ATOM_Vega10_VCE_State_Table */ + USHORT usReserve; /* No PPM Support for Vega10 */ + USHORT usPowerTuneTableOffset; /* points to ATOM_Vega10_PowerTune_Table */ + USHORT usHardLimitTableOffset; /* points to ATOM_Vega10_Hard_Limit_Table */ + USHORT usVddciLookupTableOffset; /* points to ATOM_Vega10_Voltage_Lookup_Table */ + USHORT usPCIETableOffset; /* points to ATOM_Vega10_PCIE_Table */ + USHORT usPixclkDependencyTableOffset; /* points to ATOM_Vega10_PIXCLK_Dependency_Table */ + USHORT usDispClkDependencyTableOffset; /* points to ATOM_Vega10_DISPCLK_Dependency_Table */ + USHORT usPhyClkDependencyTableOffset; /* points to ATOM_Vega10_PHYCLK_Dependency_Table */ +} ATOM_Vega10_POWERPLAYTABLE; + +typedef struct _ATOM_Vega10_State { + UCHAR ucSocClockIndexHigh; + UCHAR ucSocClockIndexLow; + UCHAR ucGfxClockIndexHigh; + UCHAR ucGfxClockIndexLow; + UCHAR ucMemClockIndexHigh; + UCHAR ucMemClockIndexLow; + USHORT usClassification; + ULONG ulCapsAndSettings; + USHORT usClassification2; +} ATOM_Vega10_State; + +typedef struct _ATOM_Vega10_State_Array { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_State_Array; + +typedef struct _ATOM_Vega10_CLK_Dependency_Record { + ULONG ulClk; /* Frequency of Clock */ + UCHAR ucVddInd; /* Base voltage */ +} ATOM_Vega10_CLK_Dependency_Record; + +typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record { + ULONG ulClk; /* Clock Frequency */ + UCHAR ucVddInd; /* SOC_VDD index */ + USHORT usCKSVOffsetandDisable; /* Bits 0~30: Voltage offset for CKS, Bit 31: Disable/enable for the GFXCLK level. */ + USHORT usAVFSOffset; /* AVFS Voltage offset */ +} ATOM_Vega10_GFXCLK_Dependency_Record; + +typedef struct _ATOM_Vega10_MCLK_Dependency_Record { + ULONG ulMemClk; /* Clock Frequency */ + UCHAR ucVddInd; /* SOC_VDD index */ + UCHAR ucVddMemInd; /* MEM_VDD - only non zero for MCLK record */ + UCHAR ucVddciInd; /* VDDCI = only non zero for MCLK record */ +} ATOM_Vega10_MCLK_Dependency_Record; + +typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_GFXCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_MCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_MCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_SOCCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_DCEFCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_PIXCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries.*/ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_DISPCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_PHYCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_MM_Dependency_Record { + UCHAR ucVddcInd; /* SOC_VDD voltage */ + ULONG ulDClk; /* UVD D-clock */ + ULONG ulVClk; /* UVD V-clock */ + ULONG ulEClk; /* VCE clock */ + ULONG ulPSPClk; /* PSP clock */ +} ATOM_Vega10_MM_Dependency_Record; + +typedef struct _ATOM_Vega10_MM_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries */ + ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */ +} ATOM_Vega10_MM_Dependency_Table; + +typedef struct _ATOM_Vega10_PCIE_Record { + ULONG ulLCLK; /* LClock */ + UCHAR ucPCIEGenSpeed; /* PCIE Speed */ + UCHAR ucPCIELaneWidth; /* PCIE Lane Width */ +} ATOM_Vega10_PCIE_Record; + +typedef struct _ATOM_Vega10_PCIE_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries */ + ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_PCIE_Table; + +typedef struct _ATOM_Vega10_Voltage_Lookup_Record { + USHORT usVdd; /* Base voltage */ +} ATOM_Vega10_Voltage_Lookup_Record; + +typedef struct _ATOM_Vega10_Voltage_Lookup_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries */ + ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */ +} ATOM_Vega10_Voltage_Lookup_Table; + +typedef struct _ATOM_Vega10_Fan_Table { + UCHAR ucRevId; /* Change this if the table format changes or version changes so that the other fields are not the same. */ + USHORT usFanOutputSensitivity; /* Sensitivity of fan reaction to temepature changes. */ + USHORT usFanRPMMax; /* The default value in RPM. */ + USHORT usThrottlingRPM; + USHORT usFanAcousticLimit; /* Minimum Fan Controller Frequency Acoustic Limit. */ + USHORT usTargetTemperature; /* The default ideal temperature in Celcius. */ + USHORT usMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. */ + USHORT usTargetGfxClk; /* The ideal Fan Controller GFXCLK Frequency Acoustic Limit. */ + USHORT usFanGainEdge; + USHORT usFanGainHotspot; + USHORT usFanGainLiquid; + USHORT usFanGainVrVddc; + USHORT usFanGainVrMvdd; + USHORT usFanGainPlx; + USHORT usFanGainHbm; + UCHAR ucEnableZeroRPM; + USHORT usFanStopTemperature; + USHORT usFanStartTemperature; +} ATOM_Vega10_Fan_Table; + +typedef struct _ATOM_Vega10_Fan_Table_V2 { + UCHAR ucRevId; + USHORT usFanOutputSensitivity; + USHORT usFanAcousticLimitRpm; + USHORT usThrottlingRPM; + USHORT usTargetTemperature; + USHORT usMinimumPWMLimit; + USHORT usTargetGfxClk; + USHORT usFanGainEdge; + USHORT usFanGainHotspot; + USHORT usFanGainLiquid; + USHORT usFanGainVrVddc; + USHORT usFanGainVrMvdd; + USHORT usFanGainPlx; + USHORT usFanGainHbm; + UCHAR ucEnableZeroRPM; + USHORT usFanStopTemperature; + USHORT usFanStartTemperature; + UCHAR ucFanParameters; + UCHAR ucFanMinRPM; + UCHAR ucFanMaxRPM; +} ATOM_Vega10_Fan_Table_V2; + +typedef struct _ATOM_Vega10_Thermal_Controller { + UCHAR ucRevId; + UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/ + UCHAR ucI2cLine; /* as interpreted by DAL I2C */ + UCHAR ucI2cAddress; + UCHAR ucFanParameters; /* Fan Control Parameters. */ + UCHAR ucFanMinRPM; /* Fan Minimum RPM (hundreds) -- for display purposes only.*/ + UCHAR ucFanMaxRPM; /* Fan Maximum RPM (hundreds) -- for display purposes only.*/ + UCHAR ucFlags; /* to be defined */ +} ATOM_Vega10_Thermal_Controller; + +typedef struct _ATOM_Vega10_VCE_State_Record +{ + UCHAR ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Vega10_MM_Dependency_Table' type */ + UCHAR ucFlag; /* 2 bits indicates memory p-states */ + UCHAR ucSCLKIndex; /* index into ATOM_Vega10_SCLK_Dependency_Table */ + UCHAR ucMCLKIndex; /* index into ATOM_Vega10_MCLK_Dependency_Table */ +} ATOM_Vega10_VCE_State_Record; + +typedef struct _ATOM_Vega10_VCE_State_Table +{ + UCHAR ucRevId; + UCHAR ucNumEntries; + ATOM_Vega10_VCE_State_Record entries[1]; +} ATOM_Vega10_VCE_State_Table; + +typedef struct _ATOM_Vega10_PowerTune_Table { + UCHAR ucRevId; + USHORT usSocketPowerLimit; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usTdcLimit; + USHORT usEdcLimit; + USHORT usSoftwareShutdownTemp; + USHORT usTemperatureLimitHotSpot; + USHORT usTemperatureLimitLiquid1; + USHORT usTemperatureLimitLiquid2; + USHORT usTemperatureLimitHBM; + USHORT usTemperatureLimitVrSoc; + USHORT usTemperatureLimitVrMem; + USHORT usTemperatureLimitPlx; + USHORT usLoadLineResistance; + UCHAR ucLiquid1_I2C_address; + UCHAR ucLiquid2_I2C_address; + UCHAR ucVr_I2C_address; + UCHAR ucPlx_I2C_address; + UCHAR ucLiquid_I2C_LineSCL; + UCHAR ucLiquid_I2C_LineSDA; + UCHAR ucVr_I2C_LineSCL; + UCHAR ucVr_I2C_LineSDA; + UCHAR ucPlx_I2C_LineSCL; + UCHAR ucPlx_I2C_LineSDA; + USHORT usTemperatureLimitTedge; +} ATOM_Vega10_PowerTune_Table; + +typedef struct _ATOM_Vega10_PowerTune_Table_V2 +{ + UCHAR ucRevId; + USHORT usSocketPowerLimit; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usTdcLimit; + USHORT usEdcLimit; + USHORT usSoftwareShutdownTemp; + USHORT usTemperatureLimitHotSpot; + USHORT usTemperatureLimitLiquid1; + USHORT usTemperatureLimitLiquid2; + USHORT usTemperatureLimitHBM; + USHORT usTemperatureLimitVrSoc; + USHORT usTemperatureLimitVrMem; + USHORT usTemperatureLimitPlx; + USHORT usLoadLineResistance; + UCHAR ucLiquid1_I2C_address; + UCHAR ucLiquid2_I2C_address; + UCHAR ucLiquid_I2C_Line; + UCHAR ucVr_I2C_address; + UCHAR ucVr_I2C_Line; + UCHAR ucPlx_I2C_address; + UCHAR ucPlx_I2C_Line; + USHORT usTemperatureLimitTedge; +} ATOM_Vega10_PowerTune_Table_V2; + +typedef struct _ATOM_Vega10_Hard_Limit_Record { + ULONG ulSOCCLKLimit; + ULONG ulGFXCLKLimit; + ULONG ulMCLKLimit; + USHORT usVddcLimit; + USHORT usVddciLimit; + USHORT usVddMemLimit; +} ATOM_Vega10_Hard_Limit_Record; + +typedef struct _ATOM_Vega10_Hard_Limit_Table +{ + UCHAR ucRevId; + UCHAR ucNumEntries; + ATOM_Vega10_Hard_Limit_Record entries[1]; +} ATOM_Vega10_Hard_Limit_Table; + +typedef struct _Vega10_PPTable_Generic_SubTable_Header +{ + UCHAR ucRevId; +} Vega10_PPTable_Generic_SubTable_Header; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c new file mode 100644 index 000000000000..8b55ae01132d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -0,0 +1,1190 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> + +#include "vega10_processpptables.h" +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "vega10_pptable.h" + +static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, + enum phm_platform_caps cap) +{ + if (enable) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap); +} + +static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) +{ + int index = GetIndexIntoMasterDataTable(powerplayinfo); + + u16 size; + u8 frev, crev; + const void *table_address = hwmgr->soft_pp_table; + + if (!table_address) { + table_address = (ATOM_Vega10_POWERPLAYTABLE *) + cgs_atom_get_data_table(hwmgr->device, index, + &size, &frev, &crev); + + hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ + } + + return table_address; +} + +static int check_powerplay_tables( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + const ATOM_Vega10_State_Array *state_arrays; + + state_arrays = (ATOM_Vega10_State_Array *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usStateArrayOffset)); + + PP_ASSERT_WITH_CODE((powerplay_table->sHeader.format_revision >= + ATOM_Vega10_TABLE_REVISION_VEGA10), + "Unsupported PPTable format!", return -1); + PP_ASSERT_WITH_CODE(powerplay_table->usStateArrayOffset, + "State table is not set!", return -1); + PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0, + "Invalid PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE(state_arrays->ucNumEntries > 0, + "Invalid PowerPlay Table!", return -1); + + return 0; +} + +static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) +{ + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_POWERPLAY), + PHM_PlatformCaps_PowerPlaySupport); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_SBIOSPOWERSOURCE), + PHM_PlatformCaps_BiosPowerSourceControl); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_HARDWAREDC), + PHM_PlatformCaps_AutomaticDCTransition); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_BACO), + PHM_PlatformCaps_BACO); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL), + PHM_PlatformCaps_CombinePCCWithThermalSignal); + + return 0; +} + +static int init_thermal_controller( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + const ATOM_Vega10_Thermal_Controller *thermal_controller; + const Vega10_PPTable_Generic_SubTable_Header *header; + const ATOM_Vega10_Fan_Table *fan_table_v1; + const ATOM_Vega10_Fan_Table_V2 *fan_table_v2; + + thermal_controller = (ATOM_Vega10_Thermal_Controller *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usThermalControllerOffset)); + + PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0), + "Thermal controller table not set!", return -EINVAL); + + hwmgr->thermal_controller.ucType = thermal_controller->ucType; + hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (thermal_controller->ucFanParameters & + ATOM_VEGA10_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + thermal_controller->ucFanParameters & + ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM = + thermal_controller->ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM = + thermal_controller->ucFanMaxRPM * 100UL; + + hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay + = 100000; + + set_hw_cap( + hwmgr, + ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + if (!powerplay_table->usFanTableOffset) + return 0; + + header = (const Vega10_PPTable_Generic_SubTable_Header *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usFanTableOffset)); + + if (header->ucRevId == 10) { + fan_table_v1 = (ATOM_Vega10_Fan_Table *)header; + + PP_ASSERT_WITH_CODE((fan_table_v1->ucRevId >= 8), + "Invalid Input Fan Table!", return -EINVAL); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table_v1->usFanOutputSensitivity); + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + le16_to_cpu(fan_table_v1->usFanRPMMax); + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = + le16_to_cpu(fan_table_v1->usThrottlingRPM); + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = + le16_to_cpu(fan_table_v1->usFanAcousticLimit); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table_v1->usTargetTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table_v1->usMinimumPWMLimit); + hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = + le16_to_cpu(fan_table_v1->usTargetGfxClk); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = + le16_to_cpu(fan_table_v1->usFanGainEdge); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = + le16_to_cpu(fan_table_v1->usFanGainHotspot); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = + le16_to_cpu(fan_table_v1->usFanGainLiquid); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = + le16_to_cpu(fan_table_v1->usFanGainVrVddc); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = + le16_to_cpu(fan_table_v1->usFanGainVrMvdd); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = + le16_to_cpu(fan_table_v1->usFanGainPlx); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = + le16_to_cpu(fan_table_v1->usFanGainHbm); + + hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = + fan_table_v1->ucEnableZeroRPM; + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = + le16_to_cpu(fan_table_v1->usFanStopTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = + le16_to_cpu(fan_table_v1->usFanStartTemperature); + } else if (header->ucRevId > 10) { + fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header; + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + fan_table_v2->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v2->ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v2->ucFanMaxRPM * 100UL; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table_v2->usFanOutputSensitivity); + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + fan_table_v2->ucFanMaxRPM * 100UL; + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = + le16_to_cpu(fan_table_v2->usThrottlingRPM); + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = + le16_to_cpu(fan_table_v2->usFanAcousticLimitRpm); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table_v2->usTargetTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table_v2->usMinimumPWMLimit); + hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = + le16_to_cpu(fan_table_v2->usTargetGfxClk); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = + le16_to_cpu(fan_table_v2->usFanGainEdge); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = + le16_to_cpu(fan_table_v2->usFanGainHotspot); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = + le16_to_cpu(fan_table_v2->usFanGainLiquid); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = + le16_to_cpu(fan_table_v2->usFanGainVrVddc); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = + le16_to_cpu(fan_table_v2->usFanGainVrMvdd); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = + le16_to_cpu(fan_table_v2->usFanGainPlx); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = + le16_to_cpu(fan_table_v2->usFanGainHbm); + + hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = + fan_table_v2->ucEnableZeroRPM; + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = + le16_to_cpu(fan_table_v2->usFanStopTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = + le16_to_cpu(fan_table_v2->usFanStartTemperature); + } + return 0; +} + +static int init_over_drive_limits( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + hwmgr->platform_descriptor.overdriveLimit.engineClock = + le32_to_cpu(powerplay_table->ulMaxODEngineClock); + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + le32_to_cpu(powerplay_table->ulMaxODMemoryClock); + + hwmgr->platform_descriptor.minOverdriveVDDC = 0; + hwmgr->platform_descriptor.maxOverdriveVDDC = 0; + hwmgr->platform_descriptor.overdriveVDDCStep = 0; + + if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 && + hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ACOverdriveSupport); + } + + return 0; +} + +static int get_mm_clock_voltage_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_mm_clock_voltage_dependency_table **vega10_mm_table, + const ATOM_Vega10_MM_Dependency_Table *mm_dependency_table) +{ + uint32_t table_size, i; + const ATOM_Vega10_MM_Dependency_Record *mm_dependency_record; + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table; + + PP_ASSERT_WITH_CODE((mm_dependency_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) * + mm_dependency_table->ucNumEntries; + mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!mm_table) + return -ENOMEM; + + mm_table->count = mm_dependency_table->ucNumEntries; + + for (i = 0; i < mm_dependency_table->ucNumEntries; i++) { + mm_dependency_record = &mm_dependency_table->entries[i]; + mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd; + mm_table->entries[i].samclock = + le32_to_cpu(mm_dependency_record->ulPSPClk); + mm_table->entries[i].eclk = le32_to_cpu(mm_dependency_record->ulEClk); + mm_table->entries[i].vclk = le32_to_cpu(mm_dependency_record->ulVClk); + mm_table->entries[i].dclk = le32_to_cpu(mm_dependency_record->ulDClk); + } + + *vega10_mm_table = mm_table; + + return 0; +} + +static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda) +{ + switch(line){ + case Vega10_I2CLineID_DDC1: + *scl = Vega10_I2C_DDC1CLK; + *sda = Vega10_I2C_DDC1DATA; + break; + case Vega10_I2CLineID_DDC2: + *scl = Vega10_I2C_DDC2CLK; + *sda = Vega10_I2C_DDC2DATA; + break; + case Vega10_I2CLineID_DDC3: + *scl = Vega10_I2C_DDC3CLK; + *sda = Vega10_I2C_DDC3DATA; + break; + case Vega10_I2CLineID_DDC4: + *scl = Vega10_I2C_DDC4CLK; + *sda = Vega10_I2C_DDC4DATA; + break; + case Vega10_I2CLineID_DDC5: + *scl = Vega10_I2C_DDC5CLK; + *sda = Vega10_I2C_DDC5DATA; + break; + case Vega10_I2CLineID_DDC6: + *scl = Vega10_I2C_DDC6CLK; + *sda = Vega10_I2C_DDC6DATA; + break; + case Vega10_I2CLineID_SCLSDA: + *scl = Vega10_I2C_SCL; + *sda = Vega10_I2C_SDA; + break; + case Vega10_I2CLineID_DDCVGA: + *scl = Vega10_I2C_DDCVGACLK; + *sda = Vega10_I2C_DDCVGADATA; + break; + default: + *scl = 0; + *sda = 0; + break; + } +} + +static int get_tdp_table( + struct pp_hwmgr *hwmgr, + struct phm_tdp_table **info_tdp_table, + const Vega10_PPTable_Generic_SubTable_Header *table) +{ + uint32_t table_size; + struct phm_tdp_table *tdp_table; + uint8_t scl; + uint8_t sda; + const ATOM_Vega10_PowerTune_Table *power_tune_table; + const ATOM_Vega10_PowerTune_Table_V2 *power_tune_table_v2; + + table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table); + + tdp_table = kzalloc(table_size, GFP_KERNEL); + + if (!tdp_table) + return -ENOMEM; + + if (table->ucRevId == 5) { + power_tune_table = (ATOM_Vega10_PowerTune_Table *)table; + tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit); + tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit); + tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit); + tdp_table->usSoftwareShutdownTemp = + le16_to_cpu(power_tune_table->usSoftwareShutdownTemp); + tdp_table->usTemperatureLimitTedge = + le16_to_cpu(power_tune_table->usTemperatureLimitTedge); + tdp_table->usTemperatureLimitHotspot = + le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot); + tdp_table->usTemperatureLimitLiquid1 = + le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1); + tdp_table->usTemperatureLimitLiquid2 = + le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2); + tdp_table->usTemperatureLimitHBM = + le16_to_cpu(power_tune_table->usTemperatureLimitHBM); + tdp_table->usTemperatureLimitVrVddc = + le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc); + tdp_table->usTemperatureLimitVrMvdd = + le16_to_cpu(power_tune_table->usTemperatureLimitVrMem); + tdp_table->usTemperatureLimitPlx = + le16_to_cpu(power_tune_table->usTemperatureLimitPlx); + tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address; + tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address; + tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL; + tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA; + tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address; + tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL; + tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA; + tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address; + tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL; + tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA; + hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance; + } else { + power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table; + tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit); + tdp_table->usTDC = le16_to_cpu(power_tune_table_v2->usTdcLimit); + tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v2->usEdcLimit); + tdp_table->usSoftwareShutdownTemp = + le16_to_cpu(power_tune_table_v2->usSoftwareShutdownTemp); + tdp_table->usTemperatureLimitTedge = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitTedge); + tdp_table->usTemperatureLimitHotspot = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitHotSpot); + tdp_table->usTemperatureLimitLiquid1 = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid1); + tdp_table->usTemperatureLimitLiquid2 = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid2); + tdp_table->usTemperatureLimitHBM = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitHBM); + tdp_table->usTemperatureLimitVrVddc = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrSoc); + tdp_table->usTemperatureLimitVrMvdd = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrMem); + tdp_table->usTemperatureLimitPlx = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitPlx); + tdp_table->ucLiquid1_I2C_address = power_tune_table_v2->ucLiquid1_I2C_address; + tdp_table->ucLiquid2_I2C_address = power_tune_table_v2->ucLiquid2_I2C_address; + + get_scl_sda_value(power_tune_table_v2->ucLiquid_I2C_Line, &scl, &sda); + + tdp_table->ucLiquid_I2C_Line = scl; + tdp_table->ucLiquid_I2C_LineSDA = sda; + + tdp_table->ucVr_I2C_address = power_tune_table_v2->ucVr_I2C_address; + + get_scl_sda_value(power_tune_table_v2->ucVr_I2C_Line, &scl, &sda); + + tdp_table->ucVr_I2C_Line = scl; + tdp_table->ucVr_I2C_LineSDA = sda; + tdp_table->ucPlx_I2C_address = power_tune_table_v2->ucPlx_I2C_address; + + get_scl_sda_value(power_tune_table_v2->ucPlx_I2C_Line, &scl, &sda); + + tdp_table->ucPlx_I2C_Line = scl; + tdp_table->ucPlx_I2C_LineSDA = sda; + + hwmgr->platform_descriptor.LoadLineSlope = + power_tune_table_v2->usLoadLineResistance; + } + + *info_tdp_table = tdp_table; + + return 0; +} + +static int get_socclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table **pp_vega10_clk_dep_table, + const ATOM_Vega10_SOCCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + phm_ppt_v1_clock_voltage_dependency_table *clk_table; + + PP_ASSERT_WITH_CODE(clk_dep_table->ucNumEntries, + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + clk_dep_table->ucNumEntries; + + clk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = (uint32_t)clk_dep_table->ucNumEntries; + + for (i = 0; i < clk_dep_table->ucNumEntries; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + +static int get_mclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table **pp_vega10_mclk_dep_table, + const ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table) +{ + uint32_t table_size, i; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table; + + PP_ASSERT_WITH_CODE(mclk_dep_table->ucNumEntries, + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + mclk_dep_table->ucNumEntries; + + mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!mclk_table) + return -ENOMEM; + + mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries; + + for (i = 0; i < mclk_dep_table->ucNumEntries; i++) { + mclk_table->entries[i].vddInd = + mclk_dep_table->entries[i].ucVddInd; + mclk_table->entries[i].vddciInd = + mclk_dep_table->entries[i].ucVddciInd; + mclk_table->entries[i].mvddInd = + mclk_dep_table->entries[i].ucVddMemInd; + mclk_table->entries[i].clk = + le32_to_cpu(mclk_dep_table->entries[i].ulMemClk); + } + + *pp_vega10_mclk_dep_table = mclk_table; + + return 0; +} + +static int get_gfxclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table + **pp_vega10_clk_dep_table, + const ATOM_Vega10_GFXCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + struct phm_ppt_v1_clock_voltage_dependency_table + *clk_table; + + PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + clk_dep_table->ucNumEntries; + + clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = clk_dep_table->ucNumEntries; + + for (i = 0; i < clk_table->count; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + clk_table->entries[i].cks_enable = + (((clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x80) + >> 15) == 0) ? 1 : 0; + clk_table->entries[i].cks_voffset = + (clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x7F); + clk_table->entries[i].sclk_offset = + clk_dep_table->entries[i].usAVFSOffset; + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + +static int get_dcefclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table + **pp_vega10_clk_dep_table, + const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + struct phm_ppt_v1_clock_voltage_dependency_table + *clk_table; + + PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + clk_dep_table->ucNumEntries; + + clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = clk_dep_table->ucNumEntries; + + for (i = 0; i < clk_table->count; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + +static int get_pcie_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_pcie_table **vega10_pcie_table, + const Vega10_PPTable_Generic_SubTable_Header *table) +{ + uint32_t table_size, i, pcie_count; + struct phm_ppt_v1_pcie_table *pcie_table; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + const ATOM_Vega10_PCIE_Table *atom_pcie_table = + (ATOM_Vega10_PCIE_Table *)table; + + PP_ASSERT_WITH_CODE(atom_pcie_table->ucNumEntries, + "Invalid PowerPlay Table!", + return 0); + + table_size = sizeof(uint32_t) + + sizeof(struct phm_ppt_v1_pcie_record) * + atom_pcie_table->ucNumEntries; + + pcie_table = (struct phm_ppt_v1_pcie_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!pcie_table) + return -ENOMEM; + + pcie_count = table_info->vdd_dep_on_sclk->count; + if (atom_pcie_table->ucNumEntries <= pcie_count) + pcie_count = atom_pcie_table->ucNumEntries; + else + pr_info("Number of Pcie Entries exceed the number of" + " GFXCLK Dpm Levels!" + " Disregarding the excess entries...\n"); + + pcie_table->count = pcie_count; + + for (i = 0; i < pcie_count; i++) { + pcie_table->entries[i].gen_speed = + atom_pcie_table->entries[i].ucPCIEGenSpeed; + pcie_table->entries[i].lane_width = + atom_pcie_table->entries[i].ucPCIELaneWidth; + pcie_table->entries[i].pcie_sclk = + atom_pcie_table->entries[i].ulLCLK; + } + + *vega10_pcie_table = pcie_table; + + return 0; +} + +static int get_hard_limits( + struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *limits, + const ATOM_Vega10_Hard_Limit_Table *limit_table) +{ + PP_ASSERT_WITH_CODE(limit_table->ucNumEntries, + "Invalid PowerPlay Table!", return -1); + + /* currently we always take entries[0] parameters */ + limits->sclk = le32_to_cpu(limit_table->entries[0].ulSOCCLKLimit); + limits->mclk = le32_to_cpu(limit_table->entries[0].ulMCLKLimit); + limits->gfxclk = le32_to_cpu(limit_table->entries[0].ulGFXCLKLimit); + limits->vddc = le16_to_cpu(limit_table->entries[0].usVddcLimit); + limits->vddci = le16_to_cpu(limit_table->entries[0].usVddciLimit); + limits->vddmem = le16_to_cpu(limit_table->entries[0].usVddMemLimit); + + return 0; +} + +static int get_valid_clk( + struct pp_hwmgr *hwmgr, + struct phm_clock_array **clk_table, + const phm_ppt_v1_clock_voltage_dependency_table *clk_volt_pp_table) +{ + uint32_t table_size, i; + struct phm_clock_array *table; + + PP_ASSERT_WITH_CODE(clk_volt_pp_table->count, + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(uint32_t) * clk_volt_pp_table->count; + + table = kzalloc(table_size, GFP_KERNEL); + + if (!table) + return -ENOMEM; + + table->count = (uint32_t)clk_volt_pp_table->count; + + for (i = 0; i < table->count; i++) + table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk; + + *clk_table = table; + + return 0; +} + +static int init_powerplay_extended_tables( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + int result = 0; + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + const ATOM_Vega10_MM_Dependency_Table *mm_dependency_table = + (const ATOM_Vega10_MM_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usMMDependencyTableOffset)); + const Vega10_PPTable_Generic_SubTable_Header *power_tune_table = + (const Vega10_PPTable_Generic_SubTable_Header *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPowerTuneTableOffset)); + const ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table = + (const ATOM_Vega10_SOCCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset)); + const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = + (const ATOM_Vega10_GFXCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); + const ATOM_Vega10_DCEFCLK_Dependency_Table *dcefclk_dep_table = + (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usDcefclkDependencyTableOffset)); + const ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table = + (const ATOM_Vega10_MCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + const ATOM_Vega10_Hard_Limit_Table *hard_limits = + (const ATOM_Vega10_Hard_Limit_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usHardLimitTableOffset)); + const Vega10_PPTable_Generic_SubTable_Header *pcie_table = + (const Vega10_PPTable_Generic_SubTable_Header *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPCIETableOffset)); + const ATOM_Vega10_PIXCLK_Dependency_Table *pixclk_dep_table = + (const ATOM_Vega10_PIXCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPixclkDependencyTableOffset)); + const ATOM_Vega10_PHYCLK_Dependency_Table *phyclk_dep_table = + (const ATOM_Vega10_PHYCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPhyClkDependencyTableOffset)); + const ATOM_Vega10_DISPCLK_Dependency_Table *dispclk_dep_table = + (const ATOM_Vega10_DISPCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usDispClkDependencyTableOffset)); + + pp_table_info->vdd_dep_on_socclk = NULL; + pp_table_info->vdd_dep_on_sclk = NULL; + pp_table_info->vdd_dep_on_mclk = NULL; + pp_table_info->vdd_dep_on_dcefclk = NULL; + pp_table_info->mm_dep_table = NULL; + pp_table_info->tdp_table = NULL; + pp_table_info->vdd_dep_on_pixclk = NULL; + pp_table_info->vdd_dep_on_phyclk = NULL; + pp_table_info->vdd_dep_on_dispclk = NULL; + + if (powerplay_table->usMMDependencyTableOffset) + result = get_mm_clock_voltage_table(hwmgr, + &pp_table_info->mm_dep_table, + mm_dependency_table); + + if (!result && powerplay_table->usPowerTuneTableOffset) + result = get_tdp_table(hwmgr, + &pp_table_info->tdp_table, + power_tune_table); + + if (!result && powerplay_table->usSocclkDependencyTableOffset) + result = get_socclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_socclk, + socclk_dep_table); + + if (!result && powerplay_table->usGfxclkDependencyTableOffset) + result = get_gfxclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_sclk, + gfxclk_dep_table); + + if (!result && powerplay_table->usPixclkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_pixclk, + (const ATOM_Vega10_DCEFCLK_Dependency_Table*) + pixclk_dep_table); + + if (!result && powerplay_table->usPhyClkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_phyclk, + (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + phyclk_dep_table); + + if (!result && powerplay_table->usDispClkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_dispclk, + (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + dispclk_dep_table); + + if (!result && powerplay_table->usDcefclkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_dcefclk, + dcefclk_dep_table); + + if (!result && powerplay_table->usMclkDependencyTableOffset) + result = get_mclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_mclk, + mclk_dep_table); + + if (!result && powerplay_table->usPCIETableOffset) + result = get_pcie_table(hwmgr, + &pp_table_info->pcie_table, + pcie_table); + + if (!result && powerplay_table->usHardLimitTableOffset) + result = get_hard_limits(hwmgr, + &pp_table_info->max_clock_voltage_on_dc, + hard_limits); + + hwmgr->dyn_state.max_clock_voltage_on_dc.sclk = + pp_table_info->max_clock_voltage_on_dc.sclk; + hwmgr->dyn_state.max_clock_voltage_on_dc.mclk = + pp_table_info->max_clock_voltage_on_dc.mclk; + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + pp_table_info->max_clock_voltage_on_dc.vddc; + hwmgr->dyn_state.max_clock_voltage_on_dc.vddci = + pp_table_info->max_clock_voltage_on_dc.vddci; + + if (!result && + pp_table_info->vdd_dep_on_socclk && + pp_table_info->vdd_dep_on_socclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_socclk_values, + pp_table_info->vdd_dep_on_socclk); + + if (!result && + pp_table_info->vdd_dep_on_sclk && + pp_table_info->vdd_dep_on_sclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_sclk_values, + pp_table_info->vdd_dep_on_sclk); + + if (!result && + pp_table_info->vdd_dep_on_dcefclk && + pp_table_info->vdd_dep_on_dcefclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_dcefclk_values, + pp_table_info->vdd_dep_on_dcefclk); + + if (!result && + pp_table_info->vdd_dep_on_mclk && + pp_table_info->vdd_dep_on_mclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_mclk_values, + pp_table_info->vdd_dep_on_mclk); + + return result; +} + +static int get_vddc_lookup_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table **lookup_table, + const ATOM_Vega10_Voltage_Lookup_Table *vddc_lookup_pp_tables, + uint32_t max_levels) +{ + uint32_t table_size, i; + phm_ppt_v1_voltage_lookup_table *table; + + PP_ASSERT_WITH_CODE((vddc_lookup_pp_tables->ucNumEntries != 0), + "Invalid SOC_VDDD Lookup Table!", return 1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; + + table = (phm_ppt_v1_voltage_lookup_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + table->count = vddc_lookup_pp_tables->ucNumEntries; + + for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) + table->entries[i].us_vdd = + le16_to_cpu(vddc_lookup_pp_tables->entries[i].usVdd); + + *lookup_table = table; + + return 0; +} + +static int init_dpm_2_parameters( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + int result = 0; + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + uint32_t disable_power_control = 0; + + pp_table_info->us_ulv_voltage_offset = + le16_to_cpu(powerplay_table->usUlvVoltageOffset); + + pp_table_info->us_ulv_smnclk_did = + le16_to_cpu(powerplay_table->usUlvSmnclkDid); + pp_table_info->us_ulv_mp1clk_did = + le16_to_cpu(powerplay_table->usUlvMp1clkDid); + pp_table_info->us_ulv_gfxclk_bypass = + le16_to_cpu(powerplay_table->usUlvGfxclkBypass); + pp_table_info->us_gfxclk_slew_rate = + le16_to_cpu(powerplay_table->usGfxclkSlewRate); + pp_table_info->uc_gfx_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucGfxVoltageMode); + pp_table_info->uc_soc_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucSocVoltageMode); + pp_table_info->uc_uclk_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucUclkVoltageMode); + pp_table_info->uc_uvd_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucUvdVoltageMode); + pp_table_info->uc_vce_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucVceVoltageMode); + pp_table_info->uc_mp0_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucMp0VoltageMode); + pp_table_info->uc_dcef_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucDcefVoltageMode); + + pp_table_info->ppm_parameter_table = NULL; + pp_table_info->vddc_lookup_table = NULL; + pp_table_info->vddmem_lookup_table = NULL; + pp_table_info->vddci_lookup_table = NULL; + + /* TDP limits */ + hwmgr->platform_descriptor.TDPODLimit = + le16_to_cpu(powerplay_table->usPowerControlLimit); + hwmgr->platform_descriptor.TDPAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustmentPolarity = 0; + hwmgr->platform_descriptor.VidMinLimit = 0; + hwmgr->platform_descriptor.VidMaxLimit = 1500000; + hwmgr->platform_descriptor.VidStep = 6250; + + disable_power_control = 0; + if (!disable_power_control) { + /* enable TDP overdrive (PowerControl) feature as well if supported */ + if (hwmgr->platform_descriptor.TDPODLimit) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerControl); + } + + if (powerplay_table->usVddcLookupTableOffset) { + const ATOM_Vega10_Voltage_Lookup_Table *vddc_table = + (ATOM_Vega10_Voltage_Lookup_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddcLookupTableOffset)); + result = get_vddc_lookup_table(hwmgr, + &pp_table_info->vddc_lookup_table, vddc_table, 8); + } + + if (powerplay_table->usVddmemLookupTableOffset) { + const ATOM_Vega10_Voltage_Lookup_Table *vdd_mem_table = + (ATOM_Vega10_Voltage_Lookup_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddmemLookupTableOffset)); + result = get_vddc_lookup_table(hwmgr, + &pp_table_info->vddmem_lookup_table, vdd_mem_table, 4); + } + + if (powerplay_table->usVddciLookupTableOffset) { + const ATOM_Vega10_Voltage_Lookup_Table *vddci_table = + (ATOM_Vega10_Voltage_Lookup_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddciLookupTableOffset)); + result = get_vddc_lookup_table(hwmgr, + &pp_table_info->vddci_lookup_table, vddci_table, 4); + } + + return result; +} + +int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table; + + hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL); + + PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable), + "Failed to allocate hwmgr->pptable!", return -ENOMEM); + + powerplay_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != powerplay_table), + "Missing PowerPlay Table!", return -1); + + result = check_powerplay_tables(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "check_powerplay_tables failed", return result); + + result = set_platform_caps(hwmgr, + le32_to_cpu(powerplay_table->ulPlatformCaps)); + + PP_ASSERT_WITH_CODE((result == 0), + "set_platform_caps failed", return result); + + result = init_thermal_controller(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_thermal_controller failed", return result); + + result = init_over_drive_limits(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_over_drive_limits failed", return result); + + result = init_powerplay_extended_tables(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_powerplay_extended_tables failed", return result); + + result = init_dpm_2_parameters(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_dpm_2_parameters failed", return result); + + return result; +} + +static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + kfree(pp_table_info->vdd_dep_on_sclk); + pp_table_info->vdd_dep_on_sclk = NULL; + + kfree(pp_table_info->vdd_dep_on_mclk); + pp_table_info->vdd_dep_on_mclk = NULL; + + kfree(pp_table_info->valid_mclk_values); + pp_table_info->valid_mclk_values = NULL; + + kfree(pp_table_info->valid_sclk_values); + pp_table_info->valid_sclk_values = NULL; + + kfree(pp_table_info->vddc_lookup_table); + pp_table_info->vddc_lookup_table = NULL; + + kfree(pp_table_info->vddmem_lookup_table); + pp_table_info->vddmem_lookup_table = NULL; + + kfree(pp_table_info->vddci_lookup_table); + pp_table_info->vddci_lookup_table = NULL; + + kfree(pp_table_info->ppm_parameter_table); + pp_table_info->ppm_parameter_table = NULL; + + kfree(pp_table_info->mm_dep_table); + pp_table_info->mm_dep_table = NULL; + + kfree(pp_table_info->cac_dtp_table); + pp_table_info->cac_dtp_table = NULL; + + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; + + kfree(pp_table_info->tdp_table); + pp_table_info->tdp_table = NULL; + + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; + + return result; +} + +const struct pp_table_func vega10_pptable_funcs = { + .pptable_init = vega10_pp_tables_initialize, + .pptable_fini = vega10_pp_tables_uninitialize, +}; + +int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) +{ + const ATOM_Vega10_State_Array *state_arrays; + const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != pp_table), + "Missing PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >= + ATOM_Vega10_TABLE_REVISION_VEGA10), + "Incorrect PowerPlay table revision!", return -1); + + state_arrays = (ATOM_Vega10_State_Array *)(((unsigned long)pp_table) + + le16_to_cpu(pp_table->usStateArrayOffset)); + + return (uint32_t)(state_arrays->ucNumEntries); +} + +static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, + uint16_t classification, uint16_t classification2) +{ + uint32_t result = 0; + + if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT) + result |= PP_StateClassificationFlag_Boot; + + if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL) + result |= PP_StateClassificationFlag_Thermal; + + if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) + result |= PP_StateClassificationFlag_LimitedPowerSource; + + if (classification & ATOM_PPLIB_CLASSIFICATION_REST) + result |= PP_StateClassificationFlag_Rest; + + if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED) + result |= PP_StateClassificationFlag_Forced; + + if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI) + result |= PP_StateClassificationFlag_ACPI; + + if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) + result |= PP_StateClassificationFlag_LimitedPowerSource_2; + + return result; +} + +int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, + uint32_t entry_index, struct pp_power_state *power_state, + int (*call_back_func)(struct pp_hwmgr *, void *, + struct pp_power_state *, void *, uint32_t)) +{ + int result = 0; + const ATOM_Vega10_State_Array *state_arrays; + const ATOM_Vega10_State *state_entry; + const ATOM_Vega10_POWERPLAYTABLE *pp_table = + get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE(pp_table, "Missing PowerPlay Table!", + return -1;); + power_state->classification.bios_index = entry_index; + + if (pp_table->sHeader.format_revision >= + ATOM_Vega10_TABLE_REVISION_VEGA10) { + state_arrays = (ATOM_Vega10_State_Array *) + (((unsigned long)pp_table) + + le16_to_cpu(pp_table->usStateArrayOffset)); + + PP_ASSERT_WITH_CODE(pp_table->usStateArrayOffset > 0, + "Invalid PowerPlay Table State Array Offset.", + return -1); + PP_ASSERT_WITH_CODE(state_arrays->ucNumEntries > 0, + "Invalid PowerPlay Table State Array.", + return -1); + PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries), + "Invalid PowerPlay Table State Array Entry.", + return -1); + + state_entry = &(state_arrays->states[entry_index]); + + result = call_back_func(hwmgr, (void *)state_entry, power_state, + (void *)pp_table, + make_classification_flags(hwmgr, + le16_to_cpu(state_entry->usClassification), + le16_to_cpu(state_entry->usClassification2))); + } + + if (!result && (power_state->classification.flags & + PP_StateClassificationFlag_Boot)) + result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware)); + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h new file mode 100644 index 000000000000..d83ed2af7aa3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h @@ -0,0 +1,62 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA10_PROCESSPPTABLES_H +#define VEGA10_PROCESSPPTABLES_H + +#include "hwmgr.h" + +enum Vega10_I2CLineID { + Vega10_I2CLineID_DDC1 = 0x90, + Vega10_I2CLineID_DDC2 = 0x91, + Vega10_I2CLineID_DDC3 = 0x92, + Vega10_I2CLineID_DDC4 = 0x93, + Vega10_I2CLineID_DDC5 = 0x94, + Vega10_I2CLineID_DDC6 = 0x95, + Vega10_I2CLineID_SCLSDA = 0x96, + Vega10_I2CLineID_DDCVGA = 0x97 +}; + +#define Vega10_I2C_DDC1DATA 0 +#define Vega10_I2C_DDC1CLK 1 +#define Vega10_I2C_DDC2DATA 2 +#define Vega10_I2C_DDC2CLK 3 +#define Vega10_I2C_DDC3DATA 4 +#define Vega10_I2C_DDC3CLK 5 +#define Vega10_I2C_SDA 40 +#define Vega10_I2C_SCL 41 +#define Vega10_I2C_DDC4DATA 65 +#define Vega10_I2C_DDC4CLK 66 +#define Vega10_I2C_DDC5DATA 0x48 +#define Vega10_I2C_DDC5CLK 0x49 +#define Vega10_I2C_DDC6DATA 0x4a +#define Vega10_I2C_DDC6CLK 0x4b +#define Vega10_I2C_DDCVGADATA 0x4c +#define Vega10_I2C_DDCVGACLK 0x4d + +extern const struct pp_table_func vega10_pptable_funcs; +extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr); +extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index, + struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *, + struct pp_power_state *, void *, uint32_t)); +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c new file mode 100644 index 000000000000..f4d77b62e1ba --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -0,0 +1,761 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "vega10_thermal.h" +#include "vega10_hwmgr.h" +#include "vega10_smumgr.h" +#include "vega10_ppsmc.h" +#include "vega10_inc.h" +#include "pp_soc15.h" +#include "pp_debug.h" + +static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) +{ + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentRpm), + "Attempt to get current RPM from SMC Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + current_rpm), + "Attempt to read current RPM from SMC Failed!", + return -1); + return 0; +} + +int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM) && + hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = + hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = + hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed) +{ + uint32_t current_rpm; + uint32_t percent = 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) + return -1; + + if (hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM != 0) + percent = current_rpm * 100 / + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; + + *speed = percent > 100 ? 100 : percent; + + return 0; +} + +int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t tach_period; + uint32_t crystal_clock_freq; + int result = 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -1; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) + result = vega10_get_current_rpm(hwmgr, speed); + else { + uint32_t reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); + tach_period = (cgs_read_register(hwmgr->device, + reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> + CG_TACH_STATUS__TACH_PERIOD__SHIFT; + + if (tach_period == 0) + return -EINVAL; + + crystal_clock_freq = smu7_get_xclk(hwmgr); + + *speed = 60 * crystal_clock_freq * 10000 / tach_period; + } + + return result; +} + +/** +* Set Fan Speed Control to static mode, +* so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + uint32_t reg; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = + (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> + CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL2__TMIN_MASK) >> + CG_FDO_CTRL2__TMIN__SHIFT; + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__TMIN_MASK) | + (0 << CG_FDO_CTRL2__TMIN__SHIFT)); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | + (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + + if (!hwmgr->fan_ctrl_is_in_default_mode) { + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | + (hwmgr->fan_ctrl_default_mode << + CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__TMIN_MASK) | + (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +/** + * @fn vega10_enable_fan_control_feature + * @brief Enables the SMC Fan Control Feature. + * + * @param hwmgr - the address of the powerplay hardware manager. + * @return 0 on success. -1 otherwise. + */ +static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( + hwmgr->smumgr, true, + data->smu_features[GNLD_FAN_CONTROL]. + smu_feature_bitmap), + "Attempt to Enable FAN CONTROL feature Failed!", + return -1); + data->smu_features[GNLD_FAN_CONTROL].enabled = true; + } + + return 0; +} + +static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( + hwmgr->smumgr, false, + data->smu_features[GNLD_FAN_CONTROL]. + smu_feature_bitmap), + "Attempt to Enable FAN CONTROL feature Failed!", + return -1); + data->smu_features[GNLD_FAN_CONTROL].enabled = false; + } + + return 0; +} + +int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -1; + + PP_ASSERT_WITH_CODE(!vega10_enable_fan_control_feature(hwmgr), + "Attempt to Enable SMC FAN CONTROL Feature Failed!", + return -1); + + return 0; +} + + +int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -1; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + PP_ASSERT_WITH_CODE(!vega10_disable_fan_control_feature(hwmgr), + "Attempt to Disable SMC FAN CONTROL Feature Failed!", + return -1); + } + return 0; +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + uint32_t reg; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (speed > 100) + speed = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1); + + duty100 = (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> + CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) | + (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT)); + + return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + result = vega10_fan_ctrl_set_static_mode(hwmgr, + FDO_PWM_MODE_STATIC); + if (!result) + result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = vega10_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t tach_period; + uint32_t crystal_clock_freq; + int result = 0; + uint32_t reg; + + if (hwmgr->thermal_controller.fanInfo.bNoFan || + (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || + (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) + return -1; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + + if (!result) { + crystal_clock_freq = smu7_get_xclk(hwmgr); + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_TACH_STATUS__TACH_PERIOD_MASK) | + (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT)); + } + return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + uint32_t reg; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); + + temp = cgs_read_register(hwmgr->device, reg); + + temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; + + /* Bit 9 means the reading is lower than the lowest usable value. */ + if (temp & 0x200) + temp = VEGA10_THERMAL_MAXIMUM_TEMP_READING; + else + temp = temp & 0x1ff; + + temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for +* high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) +{ + uint32_t low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t val, reg; + + if (low < range->min) + low = range->min; + if (high > range->max) + high = range->max; + + if (low > high) + return -EINVAL; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL); + + val = cgs_read_register(hwmgr->device, reg); + val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK); + val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) << + THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT; + val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); + val |= (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) << + THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT; + cgs_write_register(hwmgr->device, reg, val); + + reg = soc15_get_register_offset(THM_HWID, 0, + mmTHM_TCON_HTC_BASE_IDX, mmTHM_TCON_HTC); + + val = cgs_read_register(hwmgr->device, reg); + val &= ~(THM_TCON_HTC__HTC_TMP_LMT_MASK); + val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) << + THM_TCON_HTC__HTC_TMP_LMT__SHIFT; + cgs_write_register(hwmgr->device, reg, val); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + + if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_TACH_CTRL__EDGE_PER_REV_MASK) | + ((hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution - 1) << + CG_TACH_CTRL__EDGE_PER_REV__SHIFT)); + } + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) | + (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT)); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FW_CTF].supported) { + if (data->smu_features[GNLD_FW_CTF].enabled) + printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n"); + } + + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), + "Attempt to Enable FW CTF feature Failed!", + return -1); + data->smu_features[GNLD_FW_CTF].enabled = true; + return 0; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FW_CTF].supported) { + if (!data->smu_features[GNLD_FW_CTF].enabled) + printk("[Thermal_EnableAlert] FW CTF Already disabled!\n"); + } + + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + false, + data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), + "Attempt to disable FW CTF feature Failed!", + return -1); + data->smu_features[GNLD_FW_CTF].enabled = false; + return 0; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = vega10_thermal_disable_alert(hwmgr); + + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + vega10_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + int ret; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *table = &(data->smc_state_table.pp_table); + + if (!data->smu_features[GNLD_FAN_CONTROL].supported) + return 0; + + table->FanMaximumRpm = (uint16_t)hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; + table->FanThrottlingRpm = hwmgr->thermal_controller. + advanceFanControlParameters.usFanRPMMaxLimit; + table->FanAcousticLimitRpm = (uint16_t)(hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + table->FanTargetTemperature = hwmgr->thermal_controller. + advanceFanControlParameters.usTMax; + table->FanPwmMin = hwmgr->thermal_controller. + advanceFanControlParameters.usPWMMin * 255 / 100; + table->FanTargetGfxclk = (uint16_t)(hwmgr->thermal_controller. + advanceFanControlParameters.ulTargetGfxClk); + table->FanGainEdge = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainEdge; + table->FanGainHotspot = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainHotspot; + table->FanGainLiquid = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainLiquid; + table->FanGainVrVddc = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainVrVddc; + table->FanGainVrMvdd = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainVrMvdd; + table->FanGainPlx = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainPlx; + table->FanGainHbm = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainHbm; + table->FanZeroRpmEnable = hwmgr->thermal_controller. + advanceFanControlParameters.ucEnableZeroRPM; + table->FanStopTemp = hwmgr->thermal_controller. + advanceFanControlParameters.usZeroRPMStopTemperature; + table->FanStartTemp = hwmgr->thermal_controller. + advanceFanControlParameters.usZeroRPMStartTemperature; + + ret = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE); + if (ret) + pr_info("Failed to update Fan Control Table in PPTable!"); + + return ret; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled + * PHM_PlatformCaps_MicrocodeFanControl even after + * this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + vega10_fan_ctrl_start_smc_fan_control(hwmgr); + vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return vega10_thermal_set_temperature_range(hwmgr, range); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return vega10_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return vega10_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return vega10_thermal_disable_alert(hwmgr); +} + +static struct phm_master_table_item +vega10_thermal_start_thermal_controller_master_list[] = { + {NULL, tf_vega10_thermal_initialize}, + {NULL, tf_vega10_thermal_set_temperature_range}, + {NULL, tf_vega10_thermal_enable_alert}, +/* We should restrict performance levels to low before we halt the SMC. + * On the other hand we are still in boot state when we do this + * so it would be pointless. + * If this assumption changes we have to revisit this table. + */ + {NULL, tf_vega10_thermal_setup_fan_table}, + {NULL, tf_vega10_thermal_start_smc_fan_control}, + {NULL, NULL} +}; + +static struct phm_master_table_header +vega10_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + vega10_thermal_start_thermal_controller_master_list +}; + +static struct phm_master_table_item +vega10_thermal_set_temperature_range_master_list[] = { + {NULL, tf_vega10_thermal_disable_alert}, + {NULL, tf_vega10_thermal_set_temperature_range}, + {NULL, tf_vega10_thermal_enable_alert}, + {NULL, NULL} +}; + +struct phm_master_table_header +vega10_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + vega10_thermal_set_temperature_range_master_list +}; + +int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) { + vega10_fan_ctrl_set_default_mode(hwmgr); + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + } + return 0; +} + +/** +* Initializes the thermal controller related functions +* in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, + &vega10_thermal_set_temperature_range_master, + &(hwmgr->set_temperature_range)); + + if (!result) { + result = phm_construct_table(hwmgr, + &vega10_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (result) + phm_destroy_table(hwmgr, + &(hwmgr->set_temperature_range)); + } + + if (!result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h new file mode 100644 index 000000000000..8036808ec421 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h @@ -0,0 +1,83 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA10_THERMAL_H +#define VEGA10_THERMAL_H + +#include "hwmgr.h" + +struct vega10_temperature { + uint16_t edge_temp; + uint16_t hot_spot_temp; + uint16_t hbm_temp; + uint16_t vr_soc_temp; + uint16_t vr_mem_temp; + uint16_t liquid1_temp; + uint16_t liquid2_temp; + uint16_t plx_temp; +}; + +#define VEGA10_THERMAL_HIGH_ALERT_MASK 0x1 +#define VEGA10_THERMAL_LOW_ALERT_MASK 0x2 + +#define VEGA10_THERMAL_MINIMUM_TEMP_READING -256 +#define VEGA10_THERMAL_MAXIMUM_TEMP_READING 255 + +#define VEGA10_THERMAL_MINIMUM_ALERT_TEMP 0 +#define VEGA10_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); + +extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info); +extern int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed); +extern int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, + uint32_t mode); +extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed); +extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int vega10_thermal_ctrl_uninitialize_thermal_controller( + struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, + uint32_t speed); +extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, + uint32_t *speed); +extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); +extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 6dd5f0e9ef87..4e39f35bb745 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -28,6 +28,7 @@ #include <linux/errno.h> #include "amd_shared.h" #include "cgs_common.h" +#include "dm_pp_interface.h" extern const struct amd_ip_funcs pp_ip_funcs; extern const struct amd_powerplay_funcs pp_dpm_funcs; @@ -46,6 +47,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_GPU_TEMP, AMDGPU_PP_SENSOR_VCE_POWER, AMDGPU_PP_SENSOR_UVD_POWER, + AMDGPU_PP_SENSOR_GPU_POWER, }; enum amd_pp_event { @@ -225,6 +227,8 @@ struct amd_pp_display_configuration { * higher latency not allowed. */ uint32_t dce_tolerable_mclk_in_active_latency; + uint32_t min_dcef_set_clk; + uint32_t min_dcef_deep_sleep_set_clk; }; struct amd_pp_simple_clock_info { @@ -265,7 +269,11 @@ struct amd_pp_clock_info { enum amd_pp_clock_type { amd_pp_disp_clock = 1, amd_pp_sys_clock, - amd_pp_mem_clock + amd_pp_mem_clock, + amd_pp_dcef_clock, + amd_pp_soc_clock, + amd_pp_pixel_clock, + amd_pp_phy_clock }; #define MAX_NUM_CLOCKS 16 @@ -295,6 +303,18 @@ struct pp_states_info { uint32_t states[16]; }; +struct pp_gpu_power { + uint32_t vddc_power; + uint32_t vddci_power; + uint32_t max_gpu_power; + uint32_t average_gpu_power; +}; + +struct pp_display_clock_request { + enum amd_pp_clock_type clock_type; + uint32_t clock_freq_in_khz; +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -359,8 +379,16 @@ struct amd_powerplay_funcs { int (*set_sclk_od)(void *handle, uint32_t value); int (*get_mclk_od)(void *handle); int (*set_mclk_od)(void *handle, uint32_t value); - int (*read_sensor)(void *handle, int idx, int32_t *value); + int (*read_sensor)(void *handle, int idx, void *value, int *size); struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx); + int (*reset_power_profile_state)(void *handle, + struct amd_pp_profile *request); + int (*get_power_profile_state)(void *handle, + struct amd_pp_profile *query); + int (*set_power_profile_state)(void *handle, + struct amd_pp_profile *request); + int (*switch_power_profile)(void *handle, + enum amd_pp_profile_type type); }; struct amd_powerplay { @@ -389,6 +417,20 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); +int amd_powerplay_get_clock_by_type_with_latency(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks); + +int amd_powerplay_get_clock_by_type_with_voltage(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); + +int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + +int amd_powerplay_display_clock_voltage_request(void *handle, + struct pp_display_clock_request *clock); + int amd_powerplay_get_display_mode_validation_clocks(void *handle, struct amd_pp_simple_clock_info *output); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 80ed65985af8..5345b50761f4 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -182,6 +182,7 @@ enum phm_platform_caps { PHM_PlatformCaps_Thermal2GPIO17, /* indicates thermal2GPIO17 table support */ PHM_PlatformCaps_ThermalOutGPIO, /* indicates ThermalOutGPIO support, pin number is assigned by VBIOS */ PHM_PlatformCaps_DisableMclkSwitchingForFrameLock, /* Disable memory clock switch during Framelock */ + PHM_PlatformCaps_ForceMclkHigh, /* Disable memory clock switching by forcing memory clock high */ PHM_PlatformCaps_VRHotGPIOConfigurable, /* indicates VR_HOT GPIO configurable */ PHM_PlatformCaps_TempInversion, /* enable Temp Inversion feature */ PHM_PlatformCaps_IOIC3, @@ -212,6 +213,20 @@ enum phm_platform_caps { PHM_PlatformCaps_TablelessHardwareInterface, PHM_PlatformCaps_EnableDriverEVV, PHM_PlatformCaps_SPLLShutdownSupport, + PHM_PlatformCaps_VirtualBatteryState, + PHM_PlatformCaps_IgnoreForceHighClockRequestsInAPUs, + PHM_PlatformCaps_DisableMclkSwitchForVR, + PHM_PlatformCaps_SMU8, + PHM_PlatformCaps_VRHotPolarityHigh, + PHM_PlatformCaps_IPS_UlpsExclusive, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme, + PHM_PlatformCaps_GeminiAsymmetricPower, + PHM_PlatformCaps_OCLPowerOptimization, + PHM_PlatformCaps_MaxPCIEBandWidth, + PHM_PlatformCaps_PerfPerWattOptimizationSupport, + PHM_PlatformCaps_UVDClientMCTuning, + PHM_PlatformCaps_ODNinACSupport, + PHM_PlatformCaps_ODNinDCSupport, PHM_PlatformCaps_Max }; @@ -290,6 +305,8 @@ struct PP_Clocks { uint32_t memoryClock; uint32_t BusBandwidth; uint32_t engineClockInSR; + uint32_t dcefClock; + uint32_t dcefClockInSR; }; struct pp_clock_info { @@ -334,6 +351,21 @@ struct phm_clocks { uint32_t clock[MAX_NUM_CLOCKS]; }; +struct phm_odn_performance_level { + uint32_t clock; + uint32_t vddc; + bool enabled; +}; + +struct phm_odn_clock_levels { + uint32_t size; + uint32_t options; + uint32_t flags; + uint32_t number_of_performance_levels; + /* variable-sized array, specify by ulNumberOfPerformanceLevels. */ + struct phm_odn_performance_level performance_level_entries[8]; +}; + extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate); @@ -387,6 +419,17 @@ extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const st extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); +extern int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks); +extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); +extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); +extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock); + extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); #endif /* _HARDWARE_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7275a29293eb..320225dd3328 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -83,7 +83,9 @@ enum PP_FEATURE_MASK { PP_ULV_MASK = 0x100, PP_ENABLE_GFX_CG_THRU_SMU = 0x200, PP_CLOCK_STRETCH_MASK = 0x400, - PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800 + PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800, + PP_SOCCLK_DPM_MASK = 0x1000, + PP_DCEFCLK_DPM_MASK = 0x2000, }; enum PHM_BackEnd_Magic { @@ -346,6 +348,16 @@ struct pp_hwmgr_func { int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + int (*get_clock_by_type_with_latency)(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks); + int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); + int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); int (*power_off_asic)(struct pp_hwmgr *hwmgr); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); @@ -355,9 +367,12 @@ struct pp_hwmgr_func { int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*get_mclk_od)(struct pp_hwmgr *hwmgr); int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); - int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value); + int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size); int (*request_firmware)(struct pp_hwmgr *hwmgr); int (*release_firmware)(struct pp_hwmgr *hwmgr); + int (*set_power_profile_state)(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); }; struct pp_table_func { @@ -409,6 +424,7 @@ struct phm_cac_tdp_table { uint16_t usLowCACLeakage; uint16_t usHighCACLeakage; uint16_t usMaximumPowerDeliveryLimit; + uint16_t usEDCLimit; uint16_t usOperatingTempMinLimit; uint16_t usOperatingTempMaxLimit; uint16_t usOperatingTempStep; @@ -435,6 +451,46 @@ struct phm_cac_tdp_table { uint8_t ucCKS_LDO_REFSEL; }; +struct phm_tdp_table { + uint16_t usTDP; + uint16_t usConfigurableTDP; + uint16_t usTDC; + uint16_t usBatteryPowerLimit; + uint16_t usSmallPowerLimit; + uint16_t usLowCACLeakage; + uint16_t usHighCACLeakage; + uint16_t usMaximumPowerDeliveryLimit; + uint16_t usEDCLimit; + uint16_t usOperatingTempMinLimit; + uint16_t usOperatingTempMaxLimit; + uint16_t usOperatingTempStep; + uint16_t usOperatingTempHyst; + uint16_t usDefaultTargetOperatingTemp; + uint16_t usTargetOperatingTemp; + uint16_t usPowerTuneDataSetID; + uint16_t usSoftwareShutdownTemp; + uint16_t usClockStretchAmount; + uint16_t usTemperatureLimitTedge; + uint16_t usTemperatureLimitHotspot; + uint16_t usTemperatureLimitLiquid1; + uint16_t usTemperatureLimitLiquid2; + uint16_t usTemperatureLimitHBM; + uint16_t usTemperatureLimitVrVddc; + uint16_t usTemperatureLimitVrMvdd; + uint16_t usTemperatureLimitPlx; + uint8_t ucLiquid1_I2C_address; + uint8_t ucLiquid2_I2C_address; + uint8_t ucLiquid_I2C_Line; + uint8_t ucVr_I2C_address; + uint8_t ucVr_I2C_Line; + uint8_t ucPlx_I2C_address; + uint8_t ucPlx_I2C_Line; + uint8_t ucLiquid_I2C_LineSDA; + uint8_t ucVr_I2C_LineSDA; + uint8_t ucPlx_I2C_LineSDA; + uint32_t usBoostPowerLimit; +}; + struct phm_ppm_table { uint8_t ppm_design; uint16_t cpu_core_number; @@ -469,9 +525,11 @@ struct phm_vq_budgeting_table { struct phm_clock_and_voltage_limits { uint32_t sclk; uint32_t mclk; + uint32_t gfxclk; uint16_t vddc; uint16_t vddci; uint16_t vddgfx; + uint16_t vddmem; }; /* Structure to hold PPTable information */ @@ -479,18 +537,77 @@ struct phm_clock_and_voltage_limits { struct phm_ppt_v1_information { struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dcefclk; struct phm_clock_array *valid_sclk_values; struct phm_clock_array *valid_mclk_values; + struct phm_clock_array *valid_socclk_values; + struct phm_clock_array *valid_dcefclk_values; struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; struct phm_ppm_table *ppm_parameter_table; struct phm_cac_tdp_table *cac_dtp_table; + struct phm_tdp_table *tdp_table; struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddmem_lookup_table; struct phm_ppt_v1_pcie_table *pcie_table; + struct phm_ppt_v1_gpio_table *gpio_table; uint16_t us_ulv_voltage_offset; + uint16_t us_ulv_smnclk_did; + uint16_t us_ulv_mp1clk_did; + uint16_t us_ulv_gfxclk_bypass; + uint16_t us_gfxclk_slew_rate; + uint16_t us_min_gfxclk_freq_limit; +}; + +struct phm_ppt_v2_information { + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dcefclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_pixclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dispclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_phyclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; + + struct phm_clock_voltage_dependency_table *vddc_dep_on_dalpwrl; + + struct phm_clock_array *valid_sclk_values; + struct phm_clock_array *valid_mclk_values; + struct phm_clock_array *valid_socclk_values; + struct phm_clock_array *valid_dcefclk_values; + + struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; + struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; + + struct phm_ppm_table *ppm_parameter_table; + struct phm_cac_tdp_table *cac_dtp_table; + struct phm_tdp_table *tdp_table; + + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddmem_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddci_lookup_table; + + struct phm_ppt_v1_pcie_table *pcie_table; + + uint16_t us_ulv_voltage_offset; + uint16_t us_ulv_smnclk_did; + uint16_t us_ulv_mp1clk_did; + uint16_t us_ulv_gfxclk_bypass; + uint16_t us_gfxclk_slew_rate; + uint16_t us_min_gfxclk_freq_limit; + + uint8_t uc_gfx_dpm_voltage_mode; + uint8_t uc_soc_dpm_voltage_mode; + uint8_t uc_uclk_dpm_voltage_mode; + uint8_t uc_uvd_dpm_voltage_mode; + uint8_t uc_vce_dpm_voltage_mode; + uint8_t uc_mp0_dpm_voltage_mode; + uint8_t uc_dcef_dpm_voltage_mode; }; struct phm_dynamic_state_info { @@ -569,6 +686,13 @@ struct pp_advance_fan_control_parameters { uint16_t usFanGainVrMvdd; uint16_t usFanGainPlx; uint16_t usFanGainHbm; + uint8_t ucEnableZeroRPM; + uint8_t ucFanStopTemperature; + uint8_t ucFanStartTemperature; + uint32_t ulMaxFanSCLKAcousticLimit; /* Maximum Fan Controller SCLK Frequency Acoustic Limit. */ + uint32_t ulTargetGfxClk; + uint16_t usZeroRPMStartTemperature; + uint16_t usZeroRPMStopTemperature; }; struct pp_thermal_controller_info { @@ -650,6 +774,13 @@ struct pp_hwmgr { struct pp_power_state *uvd_ps; struct amd_pp_display_configuration display_config; uint32_t feature_mask; + + /* power profile */ + struct amd_pp_profile gfx_power_profile; + struct amd_pp_profile compute_power_profile; + struct amd_pp_profile default_gfx_power_profile; + struct amd_pp_profile default_compute_power_profile; + enum amd_pp_profile_type current_power_profile; }; extern int hwmgr_early_init(struct pp_instance *handle); @@ -690,6 +821,8 @@ extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t ma extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); +extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); + extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t id, uint16_t *voltage); diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h index 072880130cfb..f3f9ebb631a5 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -37,7 +37,7 @@ #define PP_ASSERT_WITH_CODE(cond, msg, code) \ do { \ if (!(cond)) { \ - pr_warning("%s\n", msg); \ + pr_warn("%s\n", msg); \ code; \ } \ } while (0) diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h index ab8494fb5c6b..4c3b537a714f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h @@ -39,6 +39,7 @@ struct pp_instance { struct pp_smumgr *smu_mgr; struct pp_hwmgr *hwmgr; struct pp_eventmgr *eventmgr; + struct mutex pp_lock; }; #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h new file mode 100644 index 000000000000..227d999b6bd1 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h @@ -0,0 +1,48 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_SOC15_H +#define PP_SOC15_H + +#include "vega10/soc15ip.h" + +inline static uint32_t soc15_get_register_offset( + uint32_t hw_id, + uint32_t inst, + uint32_t segment, + uint32_t offset) +{ + uint32_t reg = 0; + + if (hw_id == THM_HWID) + reg = THM_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == NBIF_HWID) + reg = NBIF_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == MP1_HWID) + reg = MP1_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == DF_HWID) + reg = DF_BASE.instance[inst].segment[segment] + offset; + + return reg; +} + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h index fbc504c70b8b..62f36ba2435b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -377,6 +377,7 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B) #define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C) +#define PPSMC_MSG_LedConfig ((uint16_t) 0x274) #define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275) #define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277) #define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h new file mode 100644 index 000000000000..9ef2490c7c2e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h @@ -0,0 +1,147 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU9_H +#define SMU9_H + +#pragma pack(push, 1) + +#define ENABLE_DEBUG_FEATURES + +/* Feature Control Defines */ +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_UVD_BIT 4 +#define FEATURE_DPM_VCE_BIT 5 +#define FEATURE_ULV_BIT 6 +#define FEATURE_DPM_MP0CLK_BIT 7 +#define FEATURE_DPM_LINK_BIT 8 +#define FEATURE_DPM_DCEFCLK_BIT 9 +#define FEATURE_AVFS_BIT 10 +#define FEATURE_DS_GFXCLK_BIT 11 +#define FEATURE_DS_SOCCLK_BIT 12 +#define FEATURE_DS_LCLK_BIT 13 +#define FEATURE_PPT_BIT 14 +#define FEATURE_TDC_BIT 15 +#define FEATURE_THERMAL_BIT 16 +#define FEATURE_GFX_PER_CU_CG_BIT 17 +#define FEATURE_RM_BIT 18 +#define FEATURE_DS_DCEFCLK_BIT 19 +#define FEATURE_ACDC_BIT 20 +#define FEATURE_VR0HOT_BIT 21 +#define FEATURE_VR1HOT_BIT 22 +#define FEATURE_FW_CTF_BIT 23 +#define FEATURE_LED_DISPLAY_BIT 24 +#define FEATURE_FAN_CONTROL_BIT 25 +#define FEATURE_VOLTAGE_CONTROLLER_BIT 26 +#define FEATURE_SPARE_27_BIT 27 +#define FEATURE_SPARE_28_BIT 28 +#define FEATURE_SPARE_29_BIT 29 +#define FEATURE_SPARE_30_BIT 30 +#define FEATURE_SPARE_31_BIT 31 + +#define NUM_FEATURES 32 + +#define FFEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) +#define FFEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) +#define FFEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) +#define FFEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) +#define FFEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT ) +#define FFEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT ) +#define FFEATURE_ULV_MASK (1 << FEATURE_ULV_BIT ) +#define FFEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) +#define FFEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT ) +#define FFEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT ) +#define FFEATURE_AVFS_MASK (1 << FEATURE_AVFS_BIT ) +#define FFEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) +#define FFEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) +#define FFEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FFEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) +#define FFEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) +#define FFEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) +#define FFEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT ) +#define FFEATURE_RM_MASK (1 << FEATURE_RM_BIT ) +#define FFEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT ) +#define FFEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT ) +#define FFEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) +#define FFEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) +#define FFEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) +#define FFEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) +#define FFEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) +#define FFEATURE_VOLTAGE_CONTROLLER_MASK (1 << FEATURE_VOLTAGE_CONTROLLER_BIT ) +#define FFEATURE_SPARE_27_MASK (1 << FEATURE_SPARE_27_BIT ) +#define FFEATURE_SPARE_28_MASK (1 << FEATURE_SPARE_28_BIT ) +#define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) +#define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) +#define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) +/* Workload types */ +#define WORKLOAD_VR_BIT 0 +#define WORKLOAD_FRTC_BIT 1 +#define WORKLOAD_VIDEO_BIT 2 +#define WORKLOAD_COMPUTE_BIT 3 +#define NUM_WORKLOADS 4 + +/* ULV Client Masks */ +#define ULV_CLIENT_RLC_MASK 0x00000001 +#define ULV_CLIENT_UVD_MASK 0x00000002 +#define ULV_CLIENT_VCE_MASK 0x00000004 +#define ULV_CLIENT_SDMA0_MASK 0x00000008 +#define ULV_CLIENT_SDMA1_MASK 0x00000010 +#define ULV_CLIENT_JPEG_MASK 0x00000020 +#define ULV_CLIENT_GFXCLK_DPM_MASK 0x00000040 +#define ULV_CLIENT_UVD_DPM_MASK 0x00000080 +#define ULV_CLIENT_VCE_DPM_MASK 0x00000100 +#define ULV_CLIENT_MP0CLK_DPM_MASK 0x00000200 +#define ULV_CLIENT_UCLK_DPM_MASK 0x00000400 +#define ULV_CLIENT_SOCCLK_DPM_MASK 0x00000800 +#define ULV_CLIENT_DCEFCLK_DPM_MASK 0x00001000 + +typedef struct { + /* MP1_EXT_SCRATCH0 */ + uint32_t CurrLevel_GFXCLK : 4; + uint32_t CurrLevel_UVD : 4; + uint32_t CurrLevel_VCE : 4; + uint32_t CurrLevel_LCLK : 4; + uint32_t CurrLevel_MP0CLK : 4; + uint32_t CurrLevel_UCLK : 4; + uint32_t CurrLevel_SOCCLK : 4; + uint32_t CurrLevel_DCEFCLK : 4; + /* MP1_EXT_SCRATCH1 */ + uint32_t TargLevel_GFXCLK : 4; + uint32_t TargLevel_UVD : 4; + uint32_t TargLevel_VCE : 4; + uint32_t TargLevel_LCLK : 4; + uint32_t TargLevel_MP0CLK : 4; + uint32_t TargLevel_UCLK : 4; + uint32_t TargLevel_SOCCLK : 4; + uint32_t TargLevel_DCEFCLK : 4; + /* MP1_EXT_SCRATCH2-7 */ + uint32_t Reserved[6]; +} FwStatus_t; + +#pragma pack(pop) + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h new file mode 100644 index 000000000000..2037910adcb1 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -0,0 +1,467 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU9_DRIVER_IF_H +#define SMU9_DRIVER_IF_H + +#include "smu9.h" + +/**** IMPORTANT *** + * SMU TEAM: Always increment the interface version if + * any structure is changed in this file + */ +#define SMU9_DRIVER_IF_VERSION 0xB + +#define PPTABLE_V10_SMU_VERSION 1 + +#define NUM_GFXCLK_DPM_LEVELS 8 +#define NUM_UVD_DPM_LEVELS 8 +#define NUM_VCE_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_LINK_LEVELS 2 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_UVD_DPM_LEVEL (NUM_UVD_DPM_LEVELS - 1) +#define MAX_VCE_DPM_LEVEL (NUM_VCE_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_LINK_DPM_LEVEL (NUM_LINK_LEVELS - 1) + +#define MIN_GFXCLK_DPM_LEVEL 0 +#define MIN_UVD_DPM_LEVEL 0 +#define MIN_VCE_DPM_LEVEL 0 +#define MIN_MP0CLK_DPM_LEVEL 0 +#define MIN_UCLK_DPM_LEVEL 0 +#define MIN_SOCCLK_DPM_LEVEL 0 +#define MIN_DCEFCLK_DPM_LEVEL 0 +#define MIN_LINK_DPM_LEVEL 0 + +#define NUM_EVV_VOLTAGE_LEVELS 8 +#define MAX_EVV_VOLTAGE_LEVEL (NUM_EVV_VOLTAGE_LEVELS - 1) +#define MIN_EVV_VOLTAGE_LEVEL 0 + +#define NUM_PSP_LEVEL_MAP 4 + +/* Gemini Modes */ +#define PPSMC_GeminiModeNone 0 /* Single GPU board */ +#define PPSMC_GeminiModeMaster 1 /* Master GPU on a Gemini board */ +#define PPSMC_GeminiModeSlave 2 /* Slave GPU on a Gemini board */ + +/* Voltage Modes for DPMs */ +#define VOLTAGE_MODE_AVFS_INTERPOLATE 0 +#define VOLTAGE_MODE_AVFS_WORST_CASE 1 +#define VOLTAGE_MODE_STATIC 2 + +typedef struct { + uint32_t FbMult; /* Feedback Multiplier, bit 8:0 int, bit 15:12 post_div, bit 31:16 frac */ + uint32_t SsFbMult; /* Spread FB Mult: bit 8:0 int, bit 31:16 frac */ + uint16_t SsSlewFrac; + uint8_t SsOn; + uint8_t Did; /* DID */ +} PllSetting_t; + +typedef struct { + int32_t a0; + int32_t a1; + int32_t a2; + + uint8_t a0_shift; + uint8_t a1_shift; + uint8_t a2_shift; + uint8_t padding; +} GbVdroopTable_t; + +typedef struct { + int32_t m1; + int32_t m2; + int32_t b; + + uint8_t m1_shift; + uint8_t m2_shift; + uint8_t b_shift; + uint8_t padding; +} QuadraticInt_t; + +#define NUM_DSPCLK_LEVELS 8 + +typedef enum { + DSPCLK_DCEFCLK = 0, + DSPCLK_DISPCLK, + DSPCLK_PIXCLK, + DSPCLK_PHYCLK, + DSPCLK_COUNT, +} DSPCLK_e; + +typedef struct { + uint16_t Freq; /* in MHz */ + uint16_t Vid; /* min voltage in SVI2 VID */ +} DisplayClockTable_t; + +typedef struct { + /* PowerTune */ + uint16_t SocketPowerLimit; /* Watts */ + uint16_t TdcLimit; /* Amps */ + uint16_t EdcLimit; /* Amps */ + uint16_t TedgeLimit; /* Celcius */ + uint16_t ThotspotLimit; /* Celcius */ + uint16_t ThbmLimit; /* Celcius */ + uint16_t Tvr_socLimit; /* Celcius */ + uint16_t Tvr_memLimit; /* Celcius */ + uint16_t Tliquid1Limit; /* Celcius */ + uint16_t Tliquid2Limit; /* Celcius */ + uint16_t TplxLimit; /* Celcius */ + uint16_t LoadLineResistance; /* in mOhms */ + uint32_t FitLimit; /* Failures in time (failures per million parts over the defined lifetime) */ + + /* External Component Communication Settings */ + uint8_t Liquid1_I2C_address; + uint8_t Liquid2_I2C_address; + uint8_t Vr_I2C_address; + uint8_t Plx_I2C_address; + + uint8_t GeminiMode; + uint8_t spare17[3]; + uint32_t GeminiApertureHigh; + uint32_t GeminiApertureLow; + + uint8_t Liquid_I2C_LineSCL; + uint8_t Liquid_I2C_LineSDA; + uint8_t Vr_I2C_LineSCL; + uint8_t Vr_I2C_LineSDA; + uint8_t Plx_I2C_LineSCL; + uint8_t Plx_I2C_LineSDA; + uint8_t paddingx[2]; + + /* ULV Settings */ + uint8_t UlvOffsetVid; /* SVI2 VID */ + uint8_t UlvSmnclkDid; /* DID for ULV mode. 0 means CLK will not be modified in ULV. */ + uint8_t UlvMp1clkDid; /* DID for ULV mode. 0 means CLK will not be modified in ULV. */ + uint8_t UlvGfxclkBypass; /* 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV */ + + /* VDDCR_SOC Voltages */ + uint8_t SocVid[NUM_EVV_VOLTAGE_LEVELS]; + + /* This is the minimum voltage needed to run the SOC. */ + uint8_t MinVoltageVid; /* Minimum Voltage ("Vmin") of ASIC */ + uint8_t MaxVoltageVid; /* Maximum Voltage allowable */ + uint8_t MaxVidStep; /* Max VID step that SMU will request. Multiple steps are taken if voltage change exceeds this value. */ + uint8_t padding8; + + uint8_t UlvPhaseSheddingPsi0; /* set this to 1 to set PSI0/1 to 1 in ULV mode */ + uint8_t UlvPhaseSheddingPsi1; /* set this to 1 to set PSI0/1 to 1 in ULV mode */ + uint8_t padding8_2[2]; + + /* SOC Frequencies */ + PllSetting_t GfxclkLevel [NUM_GFXCLK_DPM_LEVELS]; + + uint8_t SocclkDid [NUM_SOCCLK_DPM_LEVELS]; /* DID */ + uint8_t SocDpmVoltageIndex [NUM_SOCCLK_DPM_LEVELS]; + + uint8_t VclkDid [NUM_UVD_DPM_LEVELS]; /* DID */ + uint8_t DclkDid [NUM_UVD_DPM_LEVELS]; /* DID */ + uint8_t UvdDpmVoltageIndex [NUM_UVD_DPM_LEVELS]; + + uint8_t EclkDid [NUM_VCE_DPM_LEVELS]; /* DID */ + uint8_t VceDpmVoltageIndex [NUM_VCE_DPM_LEVELS]; + + uint8_t Mp0clkDid [NUM_MP0CLK_DPM_LEVELS]; /* DID */ + uint8_t Mp0DpmVoltageIndex [NUM_MP0CLK_DPM_LEVELS]; + + DisplayClockTable_t DisplayClockTable[DSPCLK_COUNT][NUM_DSPCLK_LEVELS]; + QuadraticInt_t DisplayClock2Gfxclk[DSPCLK_COUNT]; + + uint8_t GfxDpmVoltageMode; + uint8_t SocDpmVoltageMode; + uint8_t UclkDpmVoltageMode; + uint8_t UvdDpmVoltageMode; + + uint8_t VceDpmVoltageMode; + uint8_t Mp0DpmVoltageMode; + uint8_t DisplayDpmVoltageMode; + uint8_t padding8_3; + + uint16_t GfxclkSlewRate; + uint16_t padding; + + uint32_t LowGfxclkInterruptThreshold; /* in units of 10KHz */ + + /* Alpha parameters for clock averages. ("255"=1) */ + uint8_t GfxclkAverageAlpha; + uint8_t SocclkAverageAlpha; + uint8_t UclkAverageAlpha; + uint8_t GfxActivityAverageAlpha; + + /* UCLK States */ + uint8_t MemVid[NUM_UCLK_DPM_LEVELS]; /* VID */ + PllSetting_t UclkLevel[NUM_UCLK_DPM_LEVELS]; /* Full PLL settings */ + uint8_t MemSocVoltageIndex[NUM_UCLK_DPM_LEVELS]; + uint8_t LowestUclkReservedForUlv; /* Set this to 1 if UCLK DPM0 is reserved for ULV-mode only */ + uint8_t paddingUclk[3]; + uint16_t NumMemoryChannels; /* Used for memory bandwidth calculations */ + uint16_t MemoryChannelWidth; /* Used for memory bandwidth calculations */ + + /* CKS Settings */ + uint8_t CksEnable[NUM_GFXCLK_DPM_LEVELS]; + uint8_t CksVidOffset[NUM_GFXCLK_DPM_LEVELS]; + + /* MP0 Mapping Table */ + uint8_t PspLevelMap[NUM_PSP_LEVEL_MAP]; + + /* Link DPM Settings */ + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; /* 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 */ + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; /* 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 */ + uint8_t LclkDid[NUM_LINK_LEVELS]; /* Leave at 0 to use hardcoded values in FW */ + uint8_t paddingLinkDpm[2]; + + /* Fan Control */ + uint16_t FanStopTemp; /* Celcius */ + uint16_t FanStartTemp; /* Celcius */ + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainLiquid; + uint16_t FanGainVrVddc; + uint16_t FanGainVrMvdd; + uint16_t FanGainPlx; + uint16_t FanGainHbm; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanZeroRpmEnable; + uint8_t FanSpare; + + /* The following are AFC override parameters. Leave at 0 to use FW defaults. */ + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + /* GPIO Settings */ + uint8_t AcDcGpio; /* GPIO pin configured for AC/DC switching */ + uint8_t AcDcPolarity; /* GPIO polarity for AC/DC switching */ + uint8_t VR0HotGpio; /* GPIO pin configured for VR0 HOT event */ + uint8_t VR0HotPolarity; /* GPIO polarity for VR0 HOT event */ + uint8_t VR1HotGpio; /* GPIO pin configured for VR1 HOT event */ + uint8_t VR1HotPolarity; /* GPIO polarity for VR1 HOT event */ + uint8_t Padding1; /* replace GPIO pin configured for CTF */ + uint8_t Padding2; /* replace GPIO polarity for CTF */ + + /* LED Display Settings */ + uint8_t LedPin0; /* GPIO number for LedPin[0] */ + uint8_t LedPin1; /* GPIO number for LedPin[1] */ + uint8_t LedPin2; /* GPIO number for LedPin[2] */ + uint8_t padding8_4; + + /* AVFS */ + uint8_t OverrideBtcGbCksOn; + uint8_t OverrideAvfsGbCksOn; + uint8_t PaddingAvfs8[2]; + + GbVdroopTable_t BtcGbVdroopTableCksOn; + GbVdroopTable_t BtcGbVdroopTableCksOff; + + QuadraticInt_t AvfsGbCksOn; /* Replacement equation */ + QuadraticInt_t AvfsGbCksOff; /* Replacement equation */ + + uint8_t StaticVoltageOffsetVid[NUM_GFXCLK_DPM_LEVELS]; /* This values are added on to the final voltage calculation */ + + /* Ageing Guardband Parameters */ + uint32_t AConstant[3]; + uint16_t DC_tol_sigma; + uint16_t Platform_mean; + uint16_t Platform_sigma; + uint16_t PSM_Age_CompFactor; + + uint32_t DpmLevelPowerDelta; + + uint32_t Reserved[19]; + + /* Padding - ignore */ + uint32_t MmHubPadding[7]; /* SMU internal use */ + +} PPTable_t; + +typedef struct { + uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + /* Watermarks */ + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; /* SMU internal use */ +} Watermarks_t; + +#ifdef PPTABLE_V10_SMU_VERSION +typedef struct { + float AvfsGbCksOn[NUM_GFXCLK_DPM_LEVELS]; + float AcBtcGbCksOn[NUM_GFXCLK_DPM_LEVELS]; + float AvfsGbCksOff[NUM_GFXCLK_DPM_LEVELS]; + float AcBtcGbCksOff[NUM_GFXCLK_DPM_LEVELS]; + float DcBtcGb; + + uint32_t MmHubPadding[7]; /* SMU internal use */ +} AvfsTable_t; +#else +typedef struct { + uint32_t AvfsGbCksOn[NUM_GFXCLK_DPM_LEVELS]; + uint32_t AcBtcGbCksOn[NUM_GFXCLK_DPM_LEVELS]; + uint32_t AvfsGbCksOff[NUM_GFXCLK_DPM_LEVELS]; + uint32_t AcBtcGbCksOff[NUM_GFXCLK_DPM_LEVELS]; + uint32_t DcBtcGb; + + uint32_t MmHubPadding[7]; /* SMU internal use */ +} AvfsTable_t; +#endif + +typedef struct { + uint16_t avgPsmCount[30]; + uint16_t minPsmCount[30]; + float avgPsmVoltage[30]; + float minPsmVoltage[30]; + + uint32_t MmHubPadding[7]; /* SMU internal use */ +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsEn; + uint8_t AvfsVersion; + uint8_t Padding[2]; + + uint32_t VFT0_m1; /* Q16.16 */ + uint32_t VFT0_m2; /* Q16.16 */ + uint32_t VFT0_b; /* Q16.16 */ + + uint32_t VFT1_m1; /* Q16.16 */ + uint32_t VFT1_m2; /* Q16.16 */ + uint32_t VFT1_b; /* Q16.16 */ + + uint32_t VFT2_m1; /* Q16.16 */ + uint32_t VFT2_m2; /* Q16.16 */ + uint32_t VFT2_b; /* Q16.16 */ + + uint32_t AvfsGb0_m1; /* Q16.16 */ + uint32_t AvfsGb0_m2; /* Q16.16 */ + uint32_t AvfsGb0_b; /* Q16.16 */ + + uint32_t AcBtcGb_m1; /* Q16.16 */ + uint32_t AcBtcGb_m2; /* Q16.16 */ + uint32_t AcBtcGb_b; /* Q16.16 */ + + uint32_t AvfsTempCold; + uint32_t AvfsTempMid; + uint32_t AvfsTempHot; + + uint32_t InversionVoltage; /* in mV with 2 fractional bits */ + + uint32_t P2V_m1; /* Q16.16 */ + uint32_t P2V_m2; /* Q16.16 */ + uint32_t P2V_b; /* Q16.16 */ + + uint32_t P2VCharzFreq; /* in 10KHz units */ + + uint32_t EnabledAvfsModules; + + uint32_t MmHubPadding[7]; /* SMU internal use */ +} AvfsFuseOverride_t; + +/* These defines are used with the following messages: + * SMC_MSG_TransferTableDram2Smu + * SMC_MSG_TransferTableSmu2Dram + */ +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_AVFS_FUSE_OVERRIDE 4 +#define TABLE_PMSTATUSLOG 5 +#define TABLE_COUNT 6 + +/* These defines are used with the SMC_MSG_SetUclkFastSwitch message. */ +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 + +/* GFX DIDT Configuration */ +#define SQ_Enable_MASK 0x1 +#define SQ_IR_MASK 0x2 +#define SQ_PCC_MASK 0x4 +#define SQ_EDC_MASK 0x8 + +#define TCP_Enable_MASK 0x100 +#define TCP_IR_MASK 0x200 +#define TCP_PCC_MASK 0x400 +#define TCP_EDC_MASK 0x800 + +#define TD_Enable_MASK 0x10000 +#define TD_IR_MASK 0x20000 +#define TD_PCC_MASK 0x40000 +#define TD_EDC_MASK 0x80000 + +#define DB_Enable_MASK 0x1000000 +#define DB_IR_MASK 0x2000000 +#define DB_PCC_MASK 0x4000000 +#define DB_EDC_MASK 0x8000000 + +#define SQ_Enable_SHIFT 0 +#define SQ_IR_SHIFT 1 +#define SQ_PCC_SHIFT 2 +#define SQ_EDC_SHIFT 3 + +#define TCP_Enable_SHIFT 8 +#define TCP_IR_SHIFT 9 +#define TCP_PCC_SHIFT 10 +#define TCP_EDC_SHIFT 11 + +#define TD_Enable_SHIFT 16 +#define TD_IR_SHIFT 17 +#define TD_PCC_SHIFT 18 +#define TD_EDC_SHIFT 19 + +#define DB_Enable_SHIFT 24 +#define DB_IR_SHIFT 25 +#define DB_PCC_SHIFT 26 +#define DB_EDC_SHIFT 27 + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 7c318a95e0c2..37f41217b8a0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -38,6 +38,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs; extern const struct pp_smumgr_func tonga_smu_funcs; extern const struct pp_smumgr_func fiji_smu_funcs; extern const struct pp_smumgr_func polaris10_smu_funcs; +extern const struct pp_smumgr_func vega10_smu_funcs; enum AVFS_BTC_STATUS { AVFS_BTC_BOOT = 0, @@ -127,6 +128,8 @@ struct pp_smumgr_func { uint32_t (*get_offsetof)(uint32_t type, uint32_t member); uint32_t (*get_mac_definition)(uint32_t value); bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); + int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); }; struct pp_smumgr { @@ -175,6 +178,8 @@ extern int smu_allocate_memory(void *device, uint32_t size, void **kptr, void *handle); extern int smu_free_memory(void *device, void *handle); +extern int vega10_smum_init(struct pp_smumgr *smumgr); + extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); @@ -193,6 +198,9 @@ extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); +extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h new file mode 100644 index 000000000000..90beef35bba2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h @@ -0,0 +1,131 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_SMC_H +#define PP_SMC_H + +#pragma pack(push, 1) + +#define SMU_UCODE_VERSION 0x001c0800 + +/* SMU Response Codes: */ +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +typedef uint16_t PPSMC_Result; + +/* Message Definitions */ +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_EnableSmuFeatures 0x4 +#define PPSMC_MSG_DisableSmuFeatures 0x5 +#define PPSMC_MSG_GetEnabledSmuFeatures 0x6 +#define PPSMC_MSG_SetWorkloadMask 0x7 +#define PPSMC_MSG_SetPptLimit 0x8 +#define PPSMC_MSG_SetDriverDramAddrHigh 0x9 +#define PPSMC_MSG_SetDriverDramAddrLow 0xA +#define PPSMC_MSG_SetToolsDramAddrHigh 0xB +#define PPSMC_MSG_SetToolsDramAddrLow 0xC +#define PPSMC_MSG_TransferTableSmu2Dram 0xD +#define PPSMC_MSG_TransferTableDram2Smu 0xE +#define PPSMC_MSG_UseDefaultPPTable 0xF +#define PPSMC_MSG_UseBackupPPTable 0x10 +#define PPSMC_MSG_RunBtc 0x11 +#define PPSMC_MSG_RequestI2CBus 0x12 +#define PPSMC_MSG_ReleaseI2CBus 0x13 +#define PPSMC_MSG_ConfigureTelemetry 0x14 +#define PPSMC_MSG_SetUlvIpMask 0x15 +#define PPSMC_MSG_SetSocVidOffset 0x16 +#define PPSMC_MSG_SetMemVidOffset 0x17 +#define PPSMC_MSG_GetSocVidOffset 0x18 +#define PPSMC_MSG_GetMemVidOffset 0x19 +#define PPSMC_MSG_SetFloorSocVoltage 0x1A +#define PPSMC_MSG_SoftReset 0x1B +#define PPSMC_MSG_StartBacoMonitor 0x1C +#define PPSMC_MSG_CancelBacoMonitor 0x1D +#define PPSMC_MSG_EnterBaco 0x1E +#define PPSMC_MSG_AllowLowGfxclkInterrupt 0x1F +#define PPSMC_MSG_SetLowGfxclkInterruptThreshold 0x20 +#define PPSMC_MSG_SetSoftMinGfxclkByIndex 0x21 +#define PPSMC_MSG_SetSoftMaxGfxclkByIndex 0x22 +#define PPSMC_MSG_GetCurrentGfxclkIndex 0x23 +#define PPSMC_MSG_SetSoftMinUclkByIndex 0x24 +#define PPSMC_MSG_SetSoftMaxUclkByIndex 0x25 +#define PPSMC_MSG_GetCurrentUclkIndex 0x26 +#define PPSMC_MSG_SetSoftMinUvdByIndex 0x27 +#define PPSMC_MSG_SetSoftMaxUvdByIndex 0x28 +#define PPSMC_MSG_GetCurrentUvdIndex 0x29 +#define PPSMC_MSG_SetSoftMinVceByIndex 0x2A +#define PPSMC_MSG_SetSoftMaxVceByIndex 0x2B +#define PPSMC_MSG_SetHardMinVceByIndex 0x2C +#define PPSMC_MSG_GetCurrentVceIndex 0x2D +#define PPSMC_MSG_SetSoftMinSocclkByIndex 0x2E +#define PPSMC_MSG_SetHardMinSocclkByIndex 0x2F +#define PPSMC_MSG_SetSoftMaxSocclkByIndex 0x30 +#define PPSMC_MSG_GetCurrentSocclkIndex 0x31 +#define PPSMC_MSG_SetMinLinkDpmByIndex 0x32 +#define PPSMC_MSG_GetCurrentLinkIndex 0x33 +#define PPSMC_MSG_GetAverageGfxclkFrequency 0x34 +#define PPSMC_MSG_GetAverageSocclkFrequency 0x35 +#define PPSMC_MSG_GetAverageUclkFrequency 0x36 +#define PPSMC_MSG_GetAverageGfxActivity 0x37 +#define PPSMC_MSG_GetTemperatureEdge 0x38 +#define PPSMC_MSG_GetTemperatureHotspot 0x39 +#define PPSMC_MSG_GetTemperatureHBM 0x3A +#define PPSMC_MSG_GetTemperatureVrSoc 0x3B +#define PPSMC_MSG_GetTemperatureVrMem 0x3C +#define PPSMC_MSG_GetTemperatureLiquid 0x3D +#define PPSMC_MSG_GetTemperaturePlx 0x3E +#define PPSMC_MSG_OverDriveSetPercentage 0x3F +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x40 +#define PPSMC_MSG_SwitchToAC 0x41 +#define PPSMC_MSG_SetUclkFastSwitch 0x42 +#define PPSMC_MSG_SetUclkDownHyst 0x43 +#define PPSMC_MSG_RemoveDCClamp 0x44 +#define PPSMC_MSG_GfxDeviceDriverReset 0x45 +#define PPSMC_MSG_GetCurrentRpm 0x46 +#define PPSMC_MSG_SetVideoFps 0x47 +#define PPSMC_MSG_SetCustomGfxDpmParameters 0x48 +#define PPSMC_MSG_SetTjMax 0x49 +#define PPSMC_MSG_SetFanTemperatureTarget 0x4A +#define PPSMC_MSG_PrepareMp1ForUnload 0x4B +#define PPSMC_MSG_RequestDisplayClockByFreq 0x4C +#define PPSMC_MSG_GetClockFreqMHz 0x4D +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x4E +#define PPSMC_MSG_DramLogSetDramAddrLow 0x4F +#define PPSMC_MSG_DramLogSetDramSize 0x50 +#define PPSMC_MSG_SetFanMaxRpm 0x51 +#define PPSMC_MSG_SetFanMinPwm 0x52 +#define PPSMC_MSG_ConfigureGfxDidt 0x55 +#define PPSMC_MSG_NumOfDisplays 0x56 +#define PPSMC_Message_Count 0x57 + +typedef int PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 51ff08301651..68b01b594e11 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -4,7 +4,7 @@ SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ - smu7_smumgr.o iceland_smc.o + smu7_smumgr.o iceland_smc.o vega10_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 0f7a77b7312e..6a320b27aefd 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -1721,6 +1721,73 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } +static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct SMU73_Discrete_GraphicsLevel *levels = + data->smc_state_table.GraphicsLevel; + unsigned min_level = 1; + + hwmgr->default_gfx_power_profile.activity_threshold = + be16_to_cpu(levels[0].ActivityLevel); + hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst; + hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst; + hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE; + + hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile; + hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE; + + /* Workaround compute SDMA instability: disable lowest SCLK + * DPM level. Optimize compute power profile: Use only highest + * 2 power levels (if more than 2 are available), Hysteresis: + * 0ms up, 5ms down + */ + if (data->smc_state_table.GraphicsDpmLevelCount > 2) + min_level = data->smc_state_table.GraphicsDpmLevelCount - 2; + else if (data->smc_state_table.GraphicsDpmLevelCount == 2) + min_level = 1; + else + min_level = 0; + hwmgr->default_compute_power_profile.min_sclk = + be32_to_cpu(levels[min_level].SclkFrequency); + hwmgr->default_compute_power_profile.up_hyst = 0; + hwmgr->default_compute_power_profile.down_hyst = 5; + + hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile; + hwmgr->compute_power_profile = hwmgr->default_compute_power_profile; + + return 0; +} + +static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) +{ + pp_atomctrl_voltage_table param_led_dpm; + int result = 0; + u32 mask = 0; + + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT, + ¶m_led_dpm); + if (result == 0) { + int i, j; + u32 tmp = param_led_dpm.mask_low; + + for (i = 0, j = 0; i < 32; i++) { + if (tmp & 1) { + mask |= (i << (8 * j)); + if (++j >= 3) + break; + } + tmp >>= 1; + } + } + if (mask) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_LedConfig, + mask); + return 0; +} + /** * Initializes the SMC table and uploads it * @@ -1934,6 +2001,13 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) result = fiji_populate_pm_fuses(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to populate PM fuses to SMC memory!", return result); + + result = fiji_setup_dpm_led_config(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup dpm led config", return result); + + fiji_save_default_power_profile(hwmgr); + return 0; } @@ -2131,7 +2205,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } } - pr_warning("can't get the offset of type %x member %x\n", type, member); + pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2156,7 +2230,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) return SMU73_MAX_LEVELS_MVDD; } - pr_warning("can't get the mac of %x\n", value); + pr_warn("can't get the mac of %x\n", value); return 0; } @@ -2378,3 +2452,28 @@ bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) ? true : false; } + +int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *) + (hwmgr->smumgr->backend); + struct SMU73_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * + SMU73_MAX_LEVELS_GRAPHICS; + uint32_t i; + + for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { + levels[i].ActivityLevel = + cpu_to_be16(request->activity_threshold); + levels[i].EnabledForActivity = 1; + levels[i].UpHyst = request->up_hyst; + levels[i].DownHyst = request->down_hyst; + } + + return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + array_size, SMC_RAM_END); +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h index d30d150f9ca6..0e9e1f2d7238 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h @@ -46,6 +46,7 @@ uint32_t fiji_get_mac_definition(uint32_t value); int fiji_process_firmware_header(struct pp_hwmgr *hwmgr); int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr); - +int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 54b347366b5d..a1cb78552cf6 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -519,4 +519,5 @@ const struct pp_smumgr_func fiji_smu_funcs = { .get_mac_definition = fiji_get_mac_definition, .initialize_mc_reg_table = fiji_initialize_mc_reg_table, .is_dpm_running = fiji_is_dpm_running, + .populate_requested_graphic_levels = fiji_populate_requested_graphic_levels, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index ad82161df831..51adf04ab4b3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -122,7 +122,7 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) break; default: smu_data->power_tune_defaults = &defaults_iceland; - pr_warning("Unknown V.I. Device ID.\n"); + pr_warn("Unknown V.I. Device ID.\n"); break; } return; @@ -378,7 +378,7 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, return -EINVAL); if (NULL == hwmgr->dyn_state.cac_leakage_table) { - pr_warning("CAC Leakage Table does not exist, using vddc.\n"); + pr_warn("CAC Leakage Table does not exist, using vddc.\n"); return 0; } @@ -394,7 +394,7 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); } else { - pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); } @@ -414,7 +414,7 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; } else { - pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); } @@ -423,7 +423,7 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, } if (!vol_found) - pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); } return 0; @@ -2146,7 +2146,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); } } - pr_warning("can't get the offset of type %x member %x\n", type, member); + pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2169,7 +2169,7 @@ uint32_t iceland_get_mac_definition(uint32_t value) return SMU71_MAX_LEVELS_MVDD; } - pr_warning("can't get the mac of %x\n", value); + pr_warn("can't get the mac of %x\n", value); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 80e2329a1b9e..f68e759e8be2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -1613,6 +1613,42 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) } +static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct SMU74_Discrete_GraphicsLevel *levels = + data->smc_state_table.GraphicsLevel; + unsigned min_level = 1; + + hwmgr->default_gfx_power_profile.activity_threshold = + be16_to_cpu(levels[0].ActivityLevel); + hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst; + hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst; + hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE; + + hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile; + hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE; + + /* Workaround compute SDMA instability: disable lowest SCLK + * DPM level. Optimize compute power profile: Use only highest + * 2 power levels (if more than 2 are available), Hysteresis: + * 0ms up, 5ms down + */ + if (data->smc_state_table.GraphicsDpmLevelCount > 2) + min_level = data->smc_state_table.GraphicsDpmLevelCount - 2; + else if (data->smc_state_table.GraphicsDpmLevelCount == 2) + min_level = 1; + else + min_level = 0; + hwmgr->default_compute_power_profile.min_sclk = + be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency); + hwmgr->default_compute_power_profile.up_hyst = 0; + hwmgr->default_compute_power_profile.down_hyst = 5; + + hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile; + hwmgr->compute_power_profile = hwmgr->default_compute_power_profile; +} + /** * Initializes the SMC table and uploads it * @@ -1832,6 +1868,9 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) result = polaris10_populate_pm_fuses(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to populate PM fuses to SMC memory!", return result); + + polaris10_save_default_power_profile(hwmgr); + return 0; } @@ -2184,7 +2223,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } } - pr_warning("can't get the offset of type %x member %x\n", type, member); + pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2211,7 +2250,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value) return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; } - pr_warning("can't get the mac of %x\n", value); + pr_warn("can't get the mac of %x\n", value); return 0; } @@ -2298,3 +2337,28 @@ bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) ? true : false; } + +int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *) + (hwmgr->smumgr->backend); + struct SMU74_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * + SMU74_MAX_LEVELS_GRAPHICS; + uint32_t i; + + for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { + levels[i].ActivityLevel = + cpu_to_be16(request->activity_threshold); + levels[i].EnabledForActivity = 1; + levels[i].UpHyst = request->up_hyst; + levels[i].DownHyst = request->down_hyst; + } + + return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + array_size, SMC_RAM_END); +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h index 5ade3cea8bb7..1df8154d0626 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h @@ -37,6 +37,8 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member); uint32_t polaris10_get_mac_definition(uint32_t value); int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr); bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr); +int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index ce20ae2e520e..9616cedc139c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -409,4 +409,5 @@ const struct pp_smumgr_func polaris10_smu_funcs = { .populate_all_memory_levels = polaris10_populate_all_memory_levels, .get_mac_definition = polaris10_get_mac_definition, .is_dpm_running = polaris10_is_dpm_running, + .populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c0956a4207a9..c0d75766bbc8 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -86,6 +86,15 @@ int smum_early_init(struct pp_instance *handle) return -EINVAL; } break; + case AMDGPU_FAMILY_AI: + switch (smumgr->chip_id) { + case CHIP_VEGA10: + smumgr->smumgr_funcs = &vega10_smu_funcs; + break; + default: + return -EINVAL; + } + break; default: kfree(smumgr); return -EINVAL; @@ -374,3 +383,13 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) return true; } + +int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels) + return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels( + hwmgr, request); + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 331b0aba4a13..65d3a4893958 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -2219,6 +2219,42 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0]; } +static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct SMU72_Discrete_GraphicsLevel *levels = + data->smc_state_table.GraphicsLevel; + unsigned min_level = 1; + + hwmgr->default_gfx_power_profile.activity_threshold = + be16_to_cpu(levels[0].ActivityLevel); + hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst; + hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst; + hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE; + + hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile; + hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE; + + /* Workaround compute SDMA instability: disable lowest SCLK + * DPM level. Optimize compute power profile: Use only highest + * 2 power levels (if more than 2 are available), Hysteresis: + * 0ms up, 5ms down + */ + if (data->smc_state_table.GraphicsDpmLevelCount > 2) + min_level = data->smc_state_table.GraphicsDpmLevelCount - 2; + else if (data->smc_state_table.GraphicsDpmLevelCount == 2) + min_level = 1; + else + min_level = 0; + hwmgr->default_compute_power_profile.min_sclk = + be32_to_cpu(levels[min_level].SclkFrequency); + hwmgr->default_compute_power_profile.up_hyst = 0; + hwmgr->default_compute_power_profile.down_hyst = 5; + + hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile; + hwmgr->compute_power_profile = hwmgr->default_compute_power_profile; +} + /** * Initializes the SMC table and uploads it * @@ -2468,6 +2504,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((!result), "Failed to populate initialize MC Reg table !", return result); + tonga_save_default_power_profile(hwmgr); + return 0; } @@ -2657,7 +2695,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } } - pr_warning("can't get the offset of type %x member %x\n", type, member); + pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2681,7 +2719,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) case SMU_MAX_LEVELS_MVDD: return SMU72_MAX_LEVELS_MVDD; } - pr_warning("can't get the mac value %x\n", value); + pr_warn("can't get the mac value %x\n", value); return 0; } @@ -3210,3 +3248,28 @@ bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) ? true : false; } + +int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *) + (hwmgr->smumgr->backend); + struct SMU72_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) * + SMU72_MAX_LEVELS_GRAPHICS; + uint32_t i; + + for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { + levels[i].ActivityLevel = + cpu_to_be16(request->activity_threshold); + levels[i].EnabledForActivity = 1; + levels[i].UpHyst = request->up_hyst; + levels[i].DownHyst = request->down_hyst; + } + + return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + array_size, SMC_RAM_END); +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h index 8ae169ff541d..962860f13f24 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h @@ -56,5 +56,7 @@ uint32_t tonga_get_mac_definition(uint32_t value); int tonga_process_firmware_header(struct pp_hwmgr *hwmgr); int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr); +int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index a7d55366f2d2..c35f4c35c9ca 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -209,4 +209,5 @@ const struct pp_smumgr_func tonga_smu_funcs = { .get_mac_definition = tonga_get_mac_definition, .initialize_mc_reg_table = tonga_initialize_mc_reg_table, .is_dpm_running = tonga_is_dpm_running, + .populate_requested_graphic_levels = tonga_populate_requested_graphic_levels, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c new file mode 100644 index 000000000000..2685f02ab551 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -0,0 +1,564 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smumgr.h" +#include "vega10_inc.h" +#include "pp_soc15.h" +#include "vega10_smumgr.h" +#include "vega10_ppsmc.h" +#include "smu9_driver_if.h" + +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "smu7_smumgr.h" + +#define AVFS_EN_MSB 1568 +#define AVFS_EN_LSB 1568 + +#define VOLTAGE_SCALE 4 + +/* Microcode file is stored in this buffer */ +#define BUFFER_SIZE 80000 +#define MAX_STRING_SIZE 15 +#define BUFFER_SIZETWO 131072 /* 128 *1024 */ + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +#define smnMP1_FIRMWARE_FLAGS 0x3010028 +#define smnMP0_FW_INTF 0x3010104 +#define smnMP1_PUB_CTRL 0x3010b14 + +static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + uint32_t mp1_fw_flags, reg; + + reg = soc15_get_register_offset(NBIF_HWID, 0, + mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); + + cgs_write_register(smumgr->device, reg, + (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); + + reg = soc15_get_register_offset(NBIF_HWID, 0, + mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); + + mp1_fw_flags = cgs_read_register(smumgr->device, reg); + + if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) + return true; + + return false; +} + +/** +* Check if SMC has responded to previous message. +* +* @param smumgr the address of the powerplay hardware manager. +* @return TRUE SMC has responded, FALSE otherwise. +*/ +static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + + smum_wait_for_register_unequal(smumgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + return cgs_read_register(smumgr->device, reg); +} + +/** +* Send a message to the SMC, and do not wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return Always return 0. +*/ +int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, + uint16_t msg) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); + cgs_write_register(smumgr->device, reg, msg); + + return 0; +} + +/** +* Send a message to the SMC, and wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return The response that came from the SMC. +*/ +int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + vega10_wait_for_response(smumgr); + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + cgs_write_register(smumgr->device, reg, 0); + + vega10_send_msg_to_smc_without_waiting(smumgr, msg); + + PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1, + "Failed to send Message.", + return -1); + + return 0; +} + +/** + * Send a message to the SMC with parameter + * @param smumgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return The response that came from the SMC. + */ +int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + vega10_wait_for_response(smumgr); + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + cgs_write_register(smumgr->device, reg, 0); + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + cgs_write_register(smumgr->device, reg, parameter); + + vega10_send_msg_to_smc_without_waiting(smumgr, msg); + + PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1, + "Failed to send Message.", + return -1); + + return 0; +} + + +/** +* Send a message to the SMC with parameter, do not wait for response +* +* @param smumgr: the address of the powerplay hardware manager. +* @param msg: the message to send. +* @param parameter: the parameter to send +* @return The response that came from the SMC. +*/ +int vega10_send_msg_to_smc_with_parameter_without_waiting( + struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + uint32_t reg; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + cgs_write_register(smumgr->device, reg, parameter); + + return vega10_send_msg_to_smc_without_waiting(smumgr, msg); +} + +/** +* Retrieve an argument from SMC. +* +* @param smumgr the address of the powerplay hardware manager. +* @param arg pointer to store the argument from SMC. +* @return Always return 0. +*/ +int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +{ + uint32_t reg; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + + *arg = cgs_read_register(smumgr->device, reg); + + return 0; +} + +/** +* Copy table from SMC into driver FB +* @param smumgr the address of the SMC manager +* @param table_id the driver's table ID to copy from +*/ +int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, + "Invalid SMU Table ID!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, + "Invalid SMU Table version!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, + "Invalid SMU Table Length!", return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrHigh, + priv->smu_tables.entry[table_id].table_addr_high) == 0, + "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrLow, + priv->smu_tables.entry[table_id].table_addr_low) == 0, + "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", + return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_TransferTableSmu2Dram, + priv->smu_tables.entry[table_id].table_id) == 0, + "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", + return -1;); + + memcpy(table, priv->smu_tables.entry[table_id].table, + priv->smu_tables.entry[table_id].size); + + return 0; +} + +/** +* Copy table from Driver FB into SMC +* @param smumgr the address of the SMC manager +* @param table_id the table to copy from +*/ +int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, + "Invalid SMU Table ID!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, + "Invalid SMU Table version!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, + "Invalid SMU Table Length!", return -1;); + + memcpy(priv->smu_tables.entry[table_id].table, table, + priv->smu_tables.entry[table_id].size); + + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrHigh, + priv->smu_tables.entry[table_id].table_addr_high) == 0, + "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", + return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrLow, + priv->smu_tables.entry[table_id].table_addr_low) == 0, + "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", + return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_TransferTableDram2Smu, + priv->smu_tables.entry[table_id].table_id) == 0, + "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", + return -1;); + + return 0; +} + +int vega10_perform_btc(struct pp_smumgr *smumgr) +{ + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc_with_parameter( + smumgr, PPSMC_MSG_RunBtc, 0), + "Attempt to run DC BTC Failed!", + return -1); + return 0; +} + +int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +{ + PP_ASSERT_WITH_CODE(avfs_table, + "No access to SMC AVFS Table", + return -1); + + return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE); +} + +int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +{ + PP_ASSERT_WITH_CODE(avfs_table, + "No access to SMC AVFS Table", + return -1); + + return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE); +} + +int vega10_enable_smc_features(struct pp_smumgr *smumgr, + bool enable, uint32_t feature_mask) +{ + int msg = enable ? PPSMC_MSG_EnableSmuFeatures : + PPSMC_MSG_DisableSmuFeatures; + + return vega10_send_msg_to_smc_with_parameter(smumgr, + msg, feature_mask); +} + +int vega10_get_smc_features(struct pp_smumgr *smumgr, + uint32_t *features_enabled) +{ + if (!vega10_send_msg_to_smc(smumgr, + PPSMC_MSG_GetEnabledSmuFeatures)) { + if (!vega10_read_arg_from_smc(smumgr, features_enabled)) + return 0; + } + + return -1; +} + +int vega10_set_tools_address(struct pp_smumgr *smumgr) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || + priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { + if (!vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetToolsDramAddrHigh, + priv->smu_tables.entry[TOOLSTABLE].table_addr_high)) + vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetToolsDramAddrLow, + priv->smu_tables.entry[TOOLSTABLE].table_addr_low); + } + return 0; +} + +static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) +{ + uint32_t smc_driver_if_version; + + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, + PPSMC_MSG_GetDriverIfVersion), + "Attempt to get SMC IF Version Number Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(smumgr, + &smc_driver_if_version), + "Attempt to read SMC IF Version Number Failed!", + return -1); + + if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) + return -1; + + return 0; +} + +/** +* Write a 32bit value to the SMC SRAM space. +* ALL PARAMETERS ARE IN HOST BYTE ORDER. +* @param smumgr the address of the powerplay hardware manager. +* @param smc_addr the address in the SMC RAM to access. +* @param value to write to the SMC SRAM. +*/ +static int vega10_smu_init(struct pp_smumgr *smumgr) +{ + struct vega10_smumgr *priv; + uint64_t mc_addr; + void *kaddr = NULL; + unsigned long handle, tools_size; + int ret; + struct cgs_firmware_info info = {0}; + + ret = cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), + &info); + if (ret || !info.kptr) + return -EINVAL; + + priv = kzalloc(sizeof(struct vega10_smumgr), GFP_KERNEL); + + if (!priv) + return -ENOMEM; + + smumgr->backend = priv; + + /* allocate space for pptable */ + smu_allocate_memory(smumgr->device, + sizeof(PPTable_t), + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + PP_ASSERT_WITH_CODE(kaddr, + "[vega10_smu_init] Out of memory for pptable.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)handle); + return -1); + + priv->smu_tables.entry[PPTABLE].version = 0x01; + priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t); + priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE; + priv->smu_tables.entry[PPTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[PPTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[PPTABLE].table = kaddr; + priv->smu_tables.entry[PPTABLE].handle = handle; + + /* allocate space for watermarks table */ + smu_allocate_memory(smumgr->device, + sizeof(Watermarks_t), + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + PP_ASSERT_WITH_CODE(kaddr, + "[vega10_smu_init] Out of memory for wmtable.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)handle); + return -1); + + priv->smu_tables.entry[WMTABLE].version = 0x01; + priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); + priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; + priv->smu_tables.entry[WMTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[WMTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[WMTABLE].table = kaddr; + priv->smu_tables.entry[WMTABLE].handle = handle; + + /* allocate space for AVFS table */ + smu_allocate_memory(smumgr->device, + sizeof(AvfsTable_t), + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + PP_ASSERT_WITH_CODE(kaddr, + "[vega10_smu_init] Out of memory for avfs table.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)handle); + return -1); + + priv->smu_tables.entry[AVFSTABLE].version = 0x01; + priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t); + priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS; + priv->smu_tables.entry[AVFSTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[AVFSTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[AVFSTABLE].table = kaddr; + priv->smu_tables.entry[AVFSTABLE].handle = handle; + + tools_size = 0; + if (tools_size) { + smu_allocate_memory(smumgr->device, + tools_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + if (kaddr) { + priv->smu_tables.entry[TOOLSTABLE].version = 0x01; + priv->smu_tables.entry[TOOLSTABLE].size = tools_size; + priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG; + priv->smu_tables.entry[TOOLSTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[TOOLSTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[TOOLSTABLE].table = kaddr; + priv->smu_tables.entry[TOOLSTABLE].handle = handle; + } + } + + return 0; +} + +static int vega10_smu_fini(struct pp_smumgr *smumgr) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + if (priv) { + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); + if (priv->smu_tables.entry[TOOLSTABLE].table) + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); + kfree(smumgr->backend); + smumgr->backend = NULL; + } + return 0; +} + +static int vega10_start_smu(struct pp_smumgr *smumgr) +{ + PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr), + "Failed to verify SMC interface!", + return -1); + return 0; +} + +const struct pp_smumgr_func vega10_smu_funcs = { + .smu_init = &vega10_smu_init, + .smu_fini = &vega10_smu_fini, + .start_smu = &vega10_start_smu, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &vega10_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h new file mode 100644 index 000000000000..ad050212426d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h @@ -0,0 +1,70 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA10_SMUMANAGER_H_ +#define _VEGA10_SMUMANAGER_H_ + +#include "vega10_hwmgr.h" + +enum smu_table_id { + PPTABLE = 0, + WMTABLE, + AVFSTABLE, + TOOLSTABLE, + MAX_SMU_TABLE, +}; + +struct smu_table_entry { + uint32_t version; + uint32_t size; + uint32_t table_id; + uint32_t table_addr_high; + uint32_t table_addr_low; + uint8_t *table; + unsigned long handle; +}; + +struct smu_table_array { + struct smu_table_entry entry[MAX_SMU_TABLE]; +}; + +struct vega10_smumgr { + struct smu_table_array smu_tables; +}; + +int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); +int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id); +int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id); +int vega10_enable_smc_features(struct pp_smumgr *smumgr, + bool enable, uint32_t feature_mask); +int vega10_get_smc_features(struct pp_smumgr *smumgr, + uint32_t *features_enabled); +int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_perform_btc(struct pp_smumgr *smumgr); + +int vega10_set_tools_address(struct pp_smumgr *smumgr); + +#endif + |