summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-25 11:48:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-25 11:48:26 -0800
commit4971f090aa7f6ce5daa094ce4334f6618f93a7eb (patch)
tree45d75782b7dedbec76a3ab82d2769f7707668071 /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
parentc76cd634eb5bfd497617ea224a54a03b545c8c4d (diff)
parent2a3c83f5fe0770d13bbb71b23674886ff4111f44 (diff)
Merge tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Core: - shared fencing staging removal - drop transactional atomic helpers and move helpers to new location - DP/MST atomic cleanup - Leasing cleanups and drop EXPORT_SYMBOL - Convert drivers to atomic helpers and generic fbdev. - removed deprecated obj_ref/unref in favour of get/put - Improve dumb callback documentation - MODESET_LOCK_BEGIN/END helpers panels: - CDTech panels, Banana Pi Panel, DLC1010GIG, - Olimex LCD-O-LinuXino, Samsung S6D16D0, Truly NT35597 WQXGA, - Himax HX8357D, simulated RTSM AEMv8. - GPD Win2 panel - AUO G101EVN010 vgem: - render node support ttm: - move global init out of drivers - fix LRU handling for ghost objects - Support for simultaneous submissions to multiple engines scheduler: - timeout/fault handling changes to help GPU recovery - helpers for hw with preemption support i915: - Scaler/Watermark fixes - DP MST + powerwell fixes - PSR fixes - Break long get/put shmemfs pages - Icelake fixes - Icelake DSI video mode enablement - Engine workaround improvements amdgpu: - freesync support - GPU reset enabled on CI, VI, SOC15 dGPUs - ABM support in DC - KFD support for vega12/polaris12 - SDMA paging queue on vega - More amdkfd code sharing - DCC scanout on GFX9 - DC kerneldoc - Updated SMU firmware for GFX8 chips - XGMI PSP + hive reset support - GPU reset - DC trace support - Powerplay updates for newer Polaris - Cursor plane update fast path - kfd dma-buf support virtio-gpu: - add EDID support vmwgfx: - pageflip with damage support nouveau: - Initial Turing TU104/TU106 modesetting support msm: - a2xx gpu support for apq8060 and imx5 - a2xx gpummu support - mdp4 display support for apq8060 - DPU fixes and cleanups - enhanced profiling support - debug object naming interface - get_iova/page pinning decoupling tegra: - Tegra194 host1x, VIC and display support enabled - Audio over HDMI for Tegra186 and Tegra194 exynos: - DMA/IOMMU refactoring - plane alpha + blend mode support - Color format fixes for mixer driver rcar-du: - R8A7744 and R8A77470 support - R8A77965 LVDS support imx: - fbdev emulation fix - multi-tiled scalling fixes - SPDX identifiers rockchip - dw_hdmi support - dw-mipi-dsi + dual dsi support - mailbox read size fix qxl: - fix cursor pinning vc4: - YUV support (scaling + cursor) v3d: - enable TFU (Texture Formatting Unit) mali-dp: - add support for linear tiled formats sun4i: - Display Engine 3 support - H6 DE3 mixer 0 support - H6 display engine support - dw-hdmi support - H6 HDMI phy support - implicit fence waiting - BGRX8888 support meson: - Overlay plane support - implicit fence waiting - HDMI 1.4 4k modes bridge: - i2c fixes for sii902x" * tag 'drm-next-2018-12-14' of git://anongit.freedesktop.org/drm/drm: (1403 commits) drm/amd/display: Add fast path for cursor plane updates drm/amdgpu: Enable GPU recovery by default for CI drm/amd/display: Fix duplicating scaling/underscan connector state drm/amd/display: Fix unintialized max_bpc state values Revert "drm/amd/display: Set RMX_ASPECT as default" drm/amdgpu: Fix stub function name drm/msm/dpu: Fix clock issue after bind failure drm/msm/dpu: Clean up dpu_media_info.h static inline functions drm/msm/dpu: Further cleanups for static inline functions drm/msm/dpu: Cleanup the debugfs functions drm/msm/dpu: Remove dpu_irq and unused functions drm/msm: Make irq_postinstall optional drm/msm/dpu: Cleanup callers of dpu_hw_blk_init drm/msm/dpu: Remove unused functions drm/msm/dpu: Remove dpu_crtc_is_enabled() drm/msm/dpu: Remove dpu_crtc_get_mixer_height drm/msm/dpu: Remove dpu_dbg drm/msm: dpu: Remove crtc_lock drm/msm: dpu: Remove vblank_requested flag from dpu_crtc drm/msm: dpu: Separate crtc assignment from vblank enable ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c472
1 files changed, 344 insertions, 128 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345d6fdf..b60afeade50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
+#include "amdgpu_xgmi.h"
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
+
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+ adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
+ /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_doorbells += 0x400;
+
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
@@ -1656,7 +1670,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_allocate_static_csa(adev);
+ r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
return r;
@@ -1681,7 +1697,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
return r;
- amdgpu_xgmi_add_device(adev);
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
if (amdgpu_sriov_vf(adev))
@@ -1848,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -1890,7 +1910,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
- amdgpu_free_static_csa(adev);
+ amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
}
@@ -2337,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2435,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2455,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_device_doorbell_init(adev);
-
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2476,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_device_doorbell_init(adev);
+
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -3148,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
- bool need_full_reset, vram_lost = 0;
- int r;
-
- need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
- if (!need_full_reset) {
- amdgpu_device_ip_pre_soft_reset(adev);
- r = amdgpu_device_ip_soft_reset(adev);
- amdgpu_device_ip_post_soft_reset(adev);
- if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
-
- if (need_full_reset) {
- r = amdgpu_device_ip_suspend(adev);
-
-retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_device_ip_resume_phase1(adev);
- if (r)
- goto out;
-
- vram_lost = amdgpu_device_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
-
- r = amdgpu_gtt_mgr_recover(
- &adev->mman.bdev.man[TTM_PL_TT]);
- if (r)
- goto out;
-
- r = amdgpu_device_fw_loading(adev);
- if (r)
- return r;
-
- r = amdgpu_device_ip_resume_phase2(adev);
- if (r)
- goto out;
-
- if (vram_lost)
- amdgpu_device_fill_reset_magic(adev);
- }
- }
-
-out:
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- }
-
- if (!r)
- r = amdgpu_device_recover_vram(adev);
-
- return r;
-}
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3295,40 +3250,46 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return false;
}
- if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 &&
- !amdgpu_sriov_vf(adev))) {
- DRM_INFO("GPU recovery disabled.\n");
- return false;
- }
+ if (amdgpu_gpu_recovery == 0)
+ goto disabled;
- return true;
-}
+ if (amdgpu_sriov_vf(adev))
+ return true;
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job)
-{
- int i, r, resched;
+ if (amdgpu_gpu_recovery == -1) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA20:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ break;
+ default:
+ goto disabled;
+ }
+ }
- dev_info(adev->dev, "GPU reset begin!\n");
+ return true;
- mutex_lock(&adev->lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
+disabled:
+ DRM_INFO("GPU recovery disabled.\n");
+ return false;
+}
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
+{
+ int i, r = 0;
+ bool need_full_reset = *need_full_reset_arg;
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3348,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_fence_driver_force_completion(ring);
}
- if (amdgpu_sriov_vf(adev))
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
- else
- r = amdgpu_device_reset(adev);
+
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
+
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+
+ *need_full_reset_arg = need_full_reset;
+ }
+
+ return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ int r = 0;
+
+ /*
+ * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (need_full_reset) {
+ /* post card */
+ if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+ DRM_WARN("asic atom init failed!");
+
+ if (!r) {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&tmp_adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ }
+ }
+
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(tmp_adev);
+ need_full_reset = true;
+ r = -EAGAIN;
+ goto end;
+ }
+ }
+
+ if (!r)
+ r = amdgpu_device_recover_vram(tmp_adev);
+ else
+ tmp_adev->asic_reset_res = r;
+ }
+
+end:
+ *need_full_reset_arg = need_full_reset;
+ return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3363,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->base.sched == &ring->sched) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3373,21 +3468,142 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_helper_resume_force_mode(adev->ddev);
}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+ adev->asic_reset_res = 0;
+}
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
- } else {
- dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
- }
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+}
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
/*unlock kfd */
amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+ bool need_full_reset = false;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head device_list, *device_list_handle = NULL;
+
+ INIT_LIST_HEAD(&device_list);
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ /*
+ * In case of XGMI hive disallow concurrent resets to be triggered
+ * by different nodes. No point also since the one node already executing
+ * reset will also reset all the other nodes in the hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+ !mutex_trylock(&hive->hive_lock))
+ return 0;
+
+ /* Start with adev pre asic reset first for soft reset check.*/
+ amdgpu_device_lock_adev(adev);
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
+ /* Build list of devices to reset */
+ if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ amdgpu_device_unlock_adev(adev);
+ return -ENODEV;
+ }
+
+ /*
+ * In case we are in XGMI hive mode device reset is done for all the
+ * nodes in the hive to retrain all XGMI links and hence the reset
+ * sequence is executed in loop on all nodes.
+ */
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+ if (tmp_adev == adev)
+ continue;
+
+ amdgpu_device_lock_adev(tmp_adev);
+ r = amdgpu_device_pre_asic_reset(tmp_adev,
+ NULL,
+ &need_full_reset);
+ /*TODO Should we stop ?*/
+ if (r) {
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, tmp_adev->ddev->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+ }
+
+ /* Actual ASIC resets if needed.*/
+ /* TODO Implement XGMI hive reset logic for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ mutex_unlock(&hive->hive_lock);
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}