diff options
Diffstat (limited to 'drivers')
45 files changed, 388 insertions, 118 deletions
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 3aa8733f832a..9bf06042619a 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -29,6 +29,7 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); +EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); static DEFINE_SPINLOCK(dma_fence_stub_lock); static struct dma_fence dma_fence_stub; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4376b17ca594..56f8ca2a3bb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, } } if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { - if ((adev->flags & AMD_IS_PX) && - amdgpu_atpx_dgpu_req_power_for_displays()) { + if (adev->flags & AMD_IS_PX) { pm_runtime_get_sync(adev->ddev->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(adev->ddev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 95144e49c7f9..34471dbaa872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -342,6 +342,16 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, if (current_level == level) return count; + /* profile_exit setting is valid only when current mode is in profile mode */ + if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && + (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { + pr_err("Currently not in any profile mode!\n"); + return -EINVAL; + } + if (is_support_sw_smu(adev)) { mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 905cce1814f3..05897b05766b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev); static int psp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; psp_set_funcs(adev); - return 0; -} - -static int psp_sw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct psp_context *psp = &adev->psp; - int ret; - switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: @@ -67,6 +59,15 @@ static int psp_sw_init(void *handle) psp->adev = adev; + return 0; +} + +static int psp_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; + int ret; + ret = psp_init_microcode(psp); if (ret) { DRM_ERROR("Failed to load psp firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a07c85815b7a..4f10f5aba00b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2757,6 +2757,37 @@ error_free_sched_entity: } /** + * amdgpu_vm_check_clean_reserved - check if a VM is clean + * + * @adev: amdgpu_device pointer + * @vm: the VM to check + * + * check all entries of the root PD, if any subsequent PDs are allocated, + * it means there are page table creating and filling, and is no a clean + * VM + * + * Returns: + * 0 if this VM is clean + */ +static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + enum amdgpu_vm_level root = adev->vm_manager.root_level; + unsigned int entries = amdgpu_vm_num_entries(adev, root); + unsigned int i = 0; + + if (!(vm->root.entries)) + return 0; + + for (i = 0; i < entries; i++) { + if (vm->root.entries[i].base.bo) + return -EINVAL; + } + + return 0; +} + +/** * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM * * @adev: amdgpu_device pointer @@ -2786,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns return r; /* Sanity checks */ - if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) { - r = -EINVAL; + r = amdgpu_vm_check_clean_reserved(adev, vm); + if (r) goto unreserve_bo; - } if (pasid) { unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 8dbad496b29f..2471e7cf75ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -372,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, if (amdgpu_sriov_runtime(adev)) schedule_work(&adev->virt.flr_work); break; + case IDH_QUERY_ALIVE: + xgpu_ai_mailbox_send_ack(adev); + break; /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore * it byfar since that polling thread will handle it, * other msg like flr complete is not handled here. diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 39d151b79153..077e91a33d62 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -49,6 +49,7 @@ enum idh_event { IDH_FLR_NOTIFICATION_CMPL, IDH_SUCCESS, IDH_FAIL, + IDH_QUERY_ALIVE, IDH_EVENT_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index dc461df48da0..2191d3d0a219 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) 0xFFFFFFFF, 0x00000004); /* mc resume*/ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0); offset = 0; } else { MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), @@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->uvd.inst[i].gpu_addr)); offset = size; + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index f3f5938430d4..c0ec27991c22 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0); + offset = AMDGPU_VCE_FIRMWARE_OFFSET; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo; + uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi; + uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low; + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, - mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), - adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8); + mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff); + (tmr_mc_addr >> 40) & 0xff); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0); } else { MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), @@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), (adev->vce.gpu_addr >> 40) & 0xff); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), + offset & ~0x0f000000); + } MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), @@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff); - offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V4_0_FW_SIZE; - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), - offset & ~0x0f000000); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size); offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 1b2f69a9a24e..8d89ab7f0ae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -31,7 +31,7 @@ #include "soc15_common.h" #include "vega10_ih.h" - +#define MAX_REARM_RETRY 10 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev); @@ -382,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, } /** + * vega10_ih_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * + */ +static void vega10_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t reg_rptr = 0; + uint32_t v = 0; + uint32_t i = 0; + + if (ih == &adev->irq.ih) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + else if (ih == &adev->irq.ih1) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + else if (ih == &adev->irq.ih2) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + else + return; + + /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(reg_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** * vega10_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer @@ -395,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev, /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + vega10_ih_irq_rearm(adev, ih); } else if (ih == &adev->irq.ih) { WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); } else if (ih == &adev->irq.ih1) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1854506e3e8f..995f9df66142 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5242,7 +5242,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc *pcrtc, bool wait_for_vblank) { - uint32_t i, r; + uint32_t i; uint64_t timestamp_ns; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -5253,6 +5253,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0, vpos, hpos; + long r; unsigned long flags; struct amdgpu_bo *abo; uint64_t tiling_flags; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index ec2ca71e1323..c532e9c9e491 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -748,11 +748,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511, vsync_polarity = 1; } - if (mode->vrefresh <= 24000) + if (drm_mode_vrefresh(mode) <= 24) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; - else if (mode->vrefresh <= 25000) + else if (drm_mode_vrefresh(mode) <= 25) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; - else if (mode->vrefresh <= 30000) + else if (drm_mode_vrefresh(mode) <= 30) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 2ec89bcb59f1..8a9606f91e68 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops, int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) { struct dentry *ent; - char name[10] = ""; + char name[16] = ""; - sprintf(name, "vgpu%d", vgpu->id); + snprintf(name, 16, "vgpu%d", vgpu->id); vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); if (!vgpu->debugfs) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 4e1e425189ba..41c8ebc60c63 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -45,6 +45,7 @@ static int vgpu_gem_get_pages( int i, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; + u32 page_num; fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; if (WARN_ON(!fb_info)) @@ -54,14 +55,15 @@ static int vgpu_gem_get_pages( if (unlikely(!st)) return -ENOMEM; - ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL); + page_num = obj->base.size >> PAGE_SHIFT; + ret = sg_alloc_table(st, page_num, GFP_KERNEL); if (ret) { kfree(st); return ret; } gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (fb_info->start >> PAGE_SHIFT); - for_each_sg(st->sgl, sg, fb_info->size, i) { + for_each_sg(st->sgl, sg, page_num, i) { sg->offset = 0; sg->length = PAGE_SIZE; sg_dma_address(sg) = @@ -158,7 +160,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, return NULL; drm_gem_private_object_init(dev, &obj->base, - info->size << PAGE_SHIFT); + roundup(info->size, PAGE_SIZE)); i915_gem_object_init(obj, &intel_vgpu_gem_ops); obj->read_domains = I915_GEM_DOMAIN_GTT; @@ -206,11 +208,12 @@ static int vgpu_get_plane_info(struct drm_device *dev, struct intel_vgpu_fb_info *info, int plane_id) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_vgpu_primary_plane_format p; struct intel_vgpu_cursor_plane_format c; int ret, tile_height = 1; + memset(info, 0, sizeof(*info)); + if (plane_id == DRM_PLANE_TYPE_PRIMARY) { ret = intel_vgpu_decode_primary_plane(vgpu, &p); if (ret) @@ -267,8 +270,7 @@ static int vgpu_get_plane_info(struct drm_device *dev, return -EINVAL; } - info->size = (info->stride * roundup(info->height, tile_height) - + PAGE_SIZE - 1) >> PAGE_SHIFT; + info->size = info->stride * roundup(info->height, tile_height); if (info->size == 0) { gvt_vgpu_err("fb size is zero\n"); return -EINVAL; @@ -278,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev, gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); return -EFAULT; } - if (((info->start >> PAGE_SHIFT) + info->size) > - ggtt_total_entries(&dev_priv->ggtt)) { - gvt_vgpu_err("Invalid GTT offset or size\n"); - return -EFAULT; - } if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { gvt_vgpu_err("invalid gma addr\n"); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index c2f7d20f6346..08c74e65836b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -811,7 +811,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); /* Allocate shadow page table without guest page. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( - struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) + struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; @@ -861,7 +861,7 @@ err_free_spt: /* Allocate shadow page table associated with specific gfn. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( - struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, + struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type, unsigned long gfn, bool guest_pde_ips) { struct intel_vgpu_ppgtt_spt *spt; @@ -936,7 +936,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; - intel_gvt_gtt_type_t cur_pt_type; + enum intel_gvt_gtt_type cur_pt_type; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); @@ -1076,6 +1076,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( } else { int type = get_next_pt_type(we->type); + if (!gtt_type_is_pt(type)) + goto err; + spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); @@ -1855,7 +1858,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm) * Zero on success, negative error code in pointer if failed. */ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) { struct intel_gvt *gvt = vgpu->gvt; struct intel_vgpu_mm *mm; @@ -2309,7 +2312,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, } static int alloc_scratch_pages(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t type) + enum intel_gvt_gtt_type type) { struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; @@ -2594,7 +2597,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, * Zero on success, negative error code if failed. */ struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) { struct intel_vgpu_mm *mm; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 32c573aea494..42d0394f0de2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -95,8 +95,8 @@ struct intel_gvt_gtt { unsigned long scratch_mfn; }; -typedef enum { - GTT_TYPE_INVALID = -1, +enum intel_gvt_gtt_type { + GTT_TYPE_INVALID = 0, GTT_TYPE_GGTT_PTE, @@ -124,7 +124,7 @@ typedef enum { GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_MAX, -} intel_gvt_gtt_type_t; +}; enum intel_gvt_mm_type { INTEL_GVT_MM_GGTT, @@ -148,7 +148,7 @@ struct intel_vgpu_mm { union { struct { - intel_gvt_gtt_type_t root_entry_type; + enum intel_gvt_gtt_type root_entry_type; /* * The 4 PDPs in ring context. For 48bit addressing, * only PDP0 is valid and point to PML4. For 32it @@ -169,7 +169,7 @@ struct intel_vgpu_mm { }; struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]); static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm) { @@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt { struct intel_vgpu *vgpu; struct { - intel_gvt_gtt_type_t type; + enum intel_gvt_gtt_type type; bool pde_ips; /* for 64KB PTEs */ void *vaddr; struct page *page; @@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt { } shadow_page; struct { - intel_gvt_gtt_type_t type; + enum intel_gvt_gtt_type type; bool pde_ips; /* for 64KB PTEs */ unsigned long gfn; unsigned long write_cnt; @@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]); int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 18f01eeb2510..90673fca792f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1206,7 +1206,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) { - intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; + enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; struct intel_vgpu_mm *mm; u64 *pdps; @@ -3303,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) /* Special MMIO blocks. */ static struct gvt_mmio_block mmio_blocks[] = { {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL}, + {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, pvinfo_mmio_read, pvinfo_mmio_write}, {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index e7e14c842be4..edf6d646eb25 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ + {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 3de5b643b266..33aaa14bfdde 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -126,7 +126,4 @@ #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define VF_GUARDBAND _MMIO(0x83a4) -/* define the effective range of MCHBAR register on Sandybridge+ */ -#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000) - #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 8998fa5ab198..7c99bbc3e2b8 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1343,7 +1343,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct intel_vgpu_mm *mm; struct intel_vgpu *vgpu = workload->vgpu; - intel_gvt_gtt_type_t root_entry_type; + enum intel_gvt_gtt_type root_entry_type; u64 pdps[GVT_RING_CTX_NR_PDPS]; switch (desc->addressing_mode) { diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index b836721d3b13..f6c78c0fa74b 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -425,6 +425,26 @@ void __i915_request_submit(struct i915_request *request) if (i915_gem_context_is_banned(request->gem_context)) i915_request_skip(request, -EIO); + /* + * Are we using semaphores when the gpu is already saturated? + * + * Using semaphores incurs a cost in having the GPU poll a + * memory location, busywaiting for it to change. The continual + * memory reads can have a noticeable impact on the rest of the + * system with the extra bus traffic, stalling the cpu as it too + * tries to access memory across the bus (perf stat -e bus-cycles). + * + * If we installed a semaphore on this request and we only submit + * the request after the signaler completed, that indicates the + * system is overloaded and using semaphores at this time only + * increases the amount of work we are doing. If so, we disable + * further use of semaphores until we are idle again, whence we + * optimistically try again. + */ + if (request->sched.semaphores && + i915_sw_fence_signaled(&request->semaphore)) + request->hw_context->saturated |= request->sched.semaphores; + /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); @@ -432,6 +452,7 @@ void __i915_request_submit(struct i915_request *request) set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && !i915_request_enable_breadcrumb(request)) intel_engine_queue_breadcrumbs(engine); @@ -799,6 +820,39 @@ err_unreserve: } static int +i915_request_await_start(struct i915_request *rq, struct i915_request *signal) +{ + if (list_is_first(&signal->ring_link, &signal->ring->request_list)) + return 0; + + signal = list_prev_entry(signal, ring_link); + if (i915_timeline_sync_is_later(rq->timeline, &signal->fence)) + return 0; + + return i915_sw_fence_await_dma_fence(&rq->submit, + &signal->fence, 0, + I915_FENCE_GFP); +} + +static intel_engine_mask_t +already_busywaiting(struct i915_request *rq) +{ + /* + * Polling a semaphore causes bus traffic, delaying other users of + * both the GPU and CPU. We want to limit the impact on others, + * while taking advantage of early submission to reduce GPU + * latency. Therefore we restrict ourselves to not using more + * than one semaphore from each source, and not using a semaphore + * if we have detected the engine is saturated (i.e. would not be + * submitted early and cause bus traffic reading an already passed + * semaphore). + * + * See the are-we-too-late? check in __i915_request_submit(). + */ + return rq->sched.semaphores | rq->hw_context->saturated; +} + +static int emit_semaphore_wait(struct i915_request *to, struct i915_request *from, gfp_t gfp) @@ -811,11 +865,15 @@ emit_semaphore_wait(struct i915_request *to, GEM_BUG_ON(INTEL_GEN(to->i915) < 8); /* Just emit the first semaphore we see as request space is limited. */ - if (to->sched.semaphores & from->engine->mask) + if (already_busywaiting(to) & from->engine->mask) return i915_sw_fence_await_dma_fence(&to->submit, &from->fence, 0, I915_FENCE_GFP); + err = i915_request_await_start(to, from); + if (err < 0) + return err; + err = i915_sw_fence_await_dma_fence(&to->semaphore, &from->fence, 0, I915_FENCE_GFP); diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 3cbffd400b1b..832cb6b1e9bd 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -23,6 +23,7 @@ */ #include <linux/kthread.h> +#include <trace/events/dma_fence.h> #include <uapi/linux/sched/types.h> #include "i915_drv.h" @@ -80,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq) return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); } +static bool +__dma_fence_signal(struct dma_fence *fence) +{ + return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags); +} + +static void +__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + fence->timestamp = timestamp; + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); +} + +static void +__dma_fence_signal__notify(struct dma_fence *fence) +{ + struct dma_fence_cb *cur, *tmp; + + lockdep_assert_held(fence->lock); + lockdep_assert_irqs_disabled(); + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + INIT_LIST_HEAD(&cur->node); + cur->func(fence, cur); + } + INIT_LIST_HEAD(&fence->cb_list); +} + void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; + const ktime_t timestamp = ktime_get(); struct intel_context *ce, *cn; struct list_head *pos, *next; LIST_HEAD(signal); @@ -104,6 +135,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + + if (!__dma_fence_signal(&rq->fence)) + continue; /* * Queue for execution after dropping the signaling @@ -111,14 +146,6 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) * more signalers to the same context or engine. */ i915_request_get(rq); - - /* - * We may race with direct invocation of - * dma_fence_signal(), e.g. i915_request_retire(), - * so we need to acquire our reference to the request - * before we cancel the breadcrumb. - */ - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); list_add_tail(&rq->signal_link, &signal); } @@ -141,7 +168,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) struct i915_request *rq = list_entry(pos, typeof(*rq), signal_link); - dma_fence_signal(&rq->fence); + __dma_fence_signal__timestamp(&rq->fence, timestamp); + + spin_lock(&rq->lock); + __dma_fence_signal__notify(&rq->fence); + spin_unlock(&rq->lock); + i915_request_put(rq); } } @@ -243,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + lockdep_assert_held(&rq->lock); + lockdep_assert_irqs_disabled(); - if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) - return true; - - spin_lock(&b->irq_lock); - if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) && - !__request_completed(rq)) { + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; struct intel_context *ce = rq->hw_context; struct list_head *pos; + spin_lock(&b->irq_lock); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + __intel_breadcrumbs_arm_irq(b); /* @@ -284,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) list_move_tail(&ce->signal_link, &b->signalers); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + spin_unlock(&b->irq_lock); } - spin_unlock(&b->irq_lock); return !__request_completed(rq); } @@ -294,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) { struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) - return; + lockdep_assert_held(&rq->lock); + lockdep_assert_irqs_disabled(); + /* + * We must wait for b->irq_lock so that we know the interrupt handler + * has released its reference to the intel_context and has completed + * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if + * required). + */ spin_lock(&b->irq_lock); if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { struct intel_context *ce = rq->hw_context; diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c index 8931e0fee873..924cc556223a 100644 --- a/drivers/gpu/drm/i915/intel_context.c +++ b/drivers/gpu/drm/i915/intel_context.c @@ -230,6 +230,7 @@ intel_context_init(struct intel_context *ce, ce->gem_context = ctx; ce->engine = engine; ce->ops = engine->cops; + ce->saturated = 0; INIT_LIST_HEAD(&ce->signal_link); INIT_LIST_HEAD(&ce->signals); diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h index 68b4ca1611e0..339c7437fe82 100644 --- a/drivers/gpu/drm/i915/intel_context_types.h +++ b/drivers/gpu/drm/i915/intel_context_types.h @@ -14,6 +14,7 @@ #include <linux/types.h> #include "i915_active_types.h" +#include "intel_engine_types.h" struct i915_gem_context; struct i915_vma; @@ -58,6 +59,8 @@ struct intel_context { atomic_t pin_count; struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ + intel_engine_mask_t saturated; /* submitting semaphores too late? */ + /** * active_tracker: Active tracker for the external rq activity * on this intel_context object. diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3bd40a4a6739..5098228f1302 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12082,6 +12082,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, struct intel_crtc_state *pipe_config, bool adjust) { + struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc); bool ret = true; bool fixup_inherited = adjust && (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && @@ -12303,6 +12304,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); + /* + * Changing the EDP transcoder input mux + * (A_ONOFF vs. A_ON) requires a full modeset. + */ + if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A && + current_config->cpu_transcoder == TRANSCODER_EDP) + PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); + if (!adjust) { PIPE_CONF_CHECK_I(pipe_src_w); PIPE_CONF_CHECK_I(pipe_src_h); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index c805a0966395..5679f2fffb7c 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -1280,6 +1280,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (!HAS_FBC(dev_priv)) return 0; + /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */ + if (IS_GEMINILAKE(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) return 1; diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 37f60cb8e9e1..46cd0e70aecb 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -23,7 +23,6 @@ */ #include <linux/circ_buf.h> -#include <trace/events/dma_fence.h> #include "intel_guc_submission.h" #include "intel_lrc_reg.h" diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index e94b5b1bc1b7..e7c7be4911c1 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -311,10 +311,17 @@ retry: pipe_config->base.mode_changed = pipe_config->has_psr; pipe_config->crc_enabled = enable; - if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) { + if (IS_HASWELL(dev_priv) && + pipe_config->base.active && crtc->pipe == PIPE_A && + pipe_config->cpu_transcoder == TRANSCODER_EDP) { + bool old_need_power_well = pipe_config->pch_pfit.enabled || + pipe_config->pch_pfit.force_thru; + bool new_need_power_well = pipe_config->pch_pfit.enabled || + enable; + pipe_config->pch_pfit.force_thru = enable; - if (pipe_config->cpu_transcoder == TRANSCODER_EDP && - pipe_config->pch_pfit.enabled != enable) + + if (old_need_power_well != new_need_power_well) pipe_config->base.connectors_changed = true; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 9155dafae2a9..38e2cfa9cec7 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -747,7 +747,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) * will make sure that the refcounting is correct in case we need to * bring down the GX after a GMU failure */ - if (!IS_ERR(gmu->gxpd)) + if (!IS_ERR_OR_NULL(gmu->gxpd)) pm_runtime_get(gmu->gxpd); out: @@ -863,7 +863,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) * domain. Usually the GMU does this but only if the shutdown sequence * was successful */ - if (!IS_ERR(gmu->gxpd)) + if (!IS_ERR_OR_NULL(gmu->gxpd)) pm_runtime_put_sync(gmu->gxpd); clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); @@ -1234,7 +1234,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) pm_runtime_disable(gmu->dev); - if (!IS_ERR(gmu->gxpd)) { + if (!IS_ERR_OR_NULL(gmu->gxpd)) { pm_runtime_disable(gmu->gxpd); dev_pm_domain_detach(gmu->gxpd, false); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 018df2c3b7ed..45a5bc6ede5d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -15,7 +15,6 @@ #include "dpu_hwio.h" #include "dpu_hw_lm.h" #include "dpu_hw_mdss.h" -#include "dpu_kms.h" #define LM_OP_MODE 0x00 #define LM_OUT_SIZE 0x04 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index da1f727d7495..ce1a555e1f31 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -780,7 +780,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); struct dpu_hw_fmt_layout layout; struct drm_gem_object *obj; - struct msm_gem_object *msm_obj; struct dma_fence *fence; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); int ret; @@ -799,8 +798,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, * implicit fence and fb prepare by hand here. */ obj = msm_framebuffer_bo(new_state->fb, 0); - msm_obj = to_msm_bo(obj); - fence = reservation_object_get_excl_rcu(msm_obj->resv); + fence = reservation_object_get_excl_rcu(obj->resv); if (fence) drm_atomic_set_fence_for_plane(new_state, fence); diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index f5b1256e32b6..131c23a267ee 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -49,15 +49,13 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, struct msm_drm_private *priv = plane->dev->dev_private; struct msm_kms *kms = priv->kms; struct drm_gem_object *obj; - struct msm_gem_object *msm_obj; struct dma_fence *fence; if (!new_state->fb) return 0; obj = msm_framebuffer_bo(new_state->fb, 0); - msm_obj = to_msm_bo(obj); - fence = reservation_object_get_excl_rcu(msm_obj->resv); + fence = reservation_object_get_excl_rcu(obj->resv); drm_atomic_set_fence_for_plane(new_state, fence); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 31d5a744d84f..35f55dd25994 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -803,7 +803,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) seq_puts(m, " vmas:"); list_for_each_entry(vma, &msm_obj->vmas, list) - seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name, + seq_printf(m, " [%s: %08llx,%s,inuse=%d]", + vma->aspace != NULL ? vma->aspace->name : NULL, vma->iova, vma->mapped ? "mapped" : "unmapped", vma->inuse); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c5ac781dffee..812d1b1369a5 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -86,10 +86,6 @@ struct msm_gem_object { struct llist_node freed; - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; - /* For physically contiguous buffers. Used when we don't have * an IOMMU. Also used for stolen/splashscreen buffer. */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index 2216c58620c2..7c41b0599d1a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -41,6 +41,7 @@ struct nv50_disp_interlock { NV50_DISP_INTERLOCK__SIZE } type; u32 data; + u32 wimm; }; void corec37d_ntfy_init(struct nouveau_bo *, u32); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 2e7a0c347ddb..06ee23823a68 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) asyh->set.or = head->func->or != NULL; } - if (asyh->state.mode_changed) + if (asyh->state.mode_changed || asyh->state.connectors_changed) nv50_head_atomic_check_mode(head, asyh); if (asyh->state.color_mgmt_changed || @@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc) asyh->ovly = armh->ovly; asyh->dither = armh->dither; asyh->procamp = armh->procamp; + asyh->or = armh->or; asyh->dp = armh->dp; asyh->clr.mask = 0; asyh->set.mask = 0; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c index 9103b8494279..f7dbd965e4e7 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c @@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, return ret; } + wndw->interlock.wimm = wndw->interlock.data; wndw->immd = func; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index b95181027b31..283ff690350e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -127,7 +127,7 @@ void nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock, struct nv50_wndw_atom *asyw) { - if (interlock) { + if (interlock[NV50_DISP_INTERLOCK_CORE]) { asyw->image.mode = 0; asyw->image.interval = 1; } @@ -149,7 +149,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock, if (asyw->set.point) { if (asyw->set.point = false, asyw->set.mask) interlock[wndw->interlock.type] |= wndw->interlock.data; - interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data; + interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm; wndw->immd->point(wndw, asyw); wndw->immd->update(wndw, interlock); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 22cd45845e07..7c2fcaba42d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -631,7 +631,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, /* We need to check that the chipset is supported before booting * fbdev off the hardware, as there's no way to put it back. */ - ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); + ret = nvkm_device_pci_new(pdev, nouveau_config, "error", + true, false, 0, &device); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 7971096b6767..10d91e8bbb94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2540,6 +2540,41 @@ nv166_chipset = { .sec2 = tu102_sec2_new, }; +static const struct nvkm_device_chip +nv167_chipset = { + .name = "TU117", + .bar = tu102_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = tu102_devinit_new, + .fault = tu102_fault_new, + .fb = gv100_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .gsp = gv100_gsp_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp102_ltc_new, + .mc = tu102_mc_new, + .mmu = tu102_mmu_new, + .pci = gp100_pci_new, + .pmu = gp102_pmu_new, + .therm = gp100_therm_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = tu102_ce_new, + .ce[1] = tu102_ce_new, + .ce[2] = tu102_ce_new, + .ce[3] = tu102_ce_new, + .ce[4] = tu102_ce_new, + .disp = tu102_disp_new, + .dma = gv100_dma_new, + .fifo = tu102_fifo_new, + .nvdec[0] = gp102_nvdec_new, + .sec2 = tu102_sec2_new, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -2824,8 +2859,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, u64 mmio_base, mmio_size; u32 boot0, strap; void __iomem *map; - int ret = -EEXIST; - int i; + int ret = -EEXIST, i; + unsigned chipset; mutex_lock(&nv_devices_mutex); if (nvkm_device_find_locked(handle)) @@ -2870,6 +2905,26 @@ nvkm_device_ctor(const struct nvkm_device_func *func, strap = ioread32_native(map + 0x101000); iounmap(map); + /* chipset can be overridden for devel/testing purposes */ + chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0); + if (chipset) { + u32 override_boot0; + + if (chipset >= 0x10) { + override_boot0 = ((chipset & 0x1ff) << 20); + override_boot0 |= 0x000000a1; + } else { + if (chipset != 0x04) + override_boot0 = 0x20104000; + else + override_boot0 = 0x20004000; + } + + nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n", + boot0, override_boot0); + boot0 = override_boot0; + } + /* determine chipset and derive architecture from it */ if ((boot0 & 0x1f000000) > 0) { device->chipset = (boot0 & 0x1ff00000) >> 20; @@ -2996,6 +3051,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x162: device->chip = &nv162_chipset; break; case 0x164: device->chip = &nv164_chipset; break; case 0x166: device->chip = &nv166_chipset; break; + case 0x167: device->chip = &nv167_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 5f301e632599..818d21bd28d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) * and it's better to have a failed modeset than that. */ for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) - failsafe = cfg; + if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { + /* Try to respect sink limits too when selecting + * lowest link configuration. + */ + if (!failsafe || + (cfg->nr <= sink_nr && cfg->bw <= sink_bw)) + failsafe = cfg; + } + if (failsafe && cfg[1].rate < dataKBps) break; } diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 970f669c6d29..3b2bced1b015 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -165,6 +165,10 @@ err_out0: void panfrost_device_fini(struct panfrost_device *pfdev) { + panfrost_job_fini(pfdev); + panfrost_mmu_fini(pfdev); + panfrost_gpu_fini(pfdev); + panfrost_reset_fini(pfdev); panfrost_regulator_fini(pfdev); panfrost_clk_fini(pfdev); } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 94b0819ad50b..d11e2281dde6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -219,7 +219,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, fail_job: panfrost_job_put(job); fail_out_sync: - drm_syncobj_put(sync_out); + if (sync_out) + drm_syncobj_put(sync_out); return ret; } diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 0c5d391f0a8f..4501597f30ab 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm) dev_err(drm->dev, "CLCD: unable to get clcdclk.\n"); return PTR_ERR(parent); } + + spin_lock_init(&priv->tim2_lock); + /* If the clock divider is broken, use the parent directly */ if (priv->variant->broken_clockdivider) { priv->clk = parent; return 0; } parent_name = __clk_get_name(parent); - - spin_lock_init(&priv->tim2_lock); div->init = &init; ret = devm_clk_hw_register(drm->dev, div); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index aa898c699101..433df7036f96 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ - *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); + *ref_div = min(max(den/post_div, 1u), ref_div_max); *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); /* limit fb divider to its maximum */ if (*fb_div > fb_div_max) { - *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); + *ref_div = (*ref_div * fb_div_max)/(*fb_div); *fb_div = fb_div_max; } } |