diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-20 09:03:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-03-20 09:03:54 -0700 |
commit | 69d3e5a5a66bb59c39f36dcb9cf4e9a4239aa8cd (patch) | |
tree | f0bc3c72eca780903b81cf67edfa26f38b3d6e97 | |
parent | 6c90b86a745a446717fdf408c4a8a4631a5e8ee3 (diff) | |
parent | 5366b96b1997745d903c697a32e0ed27b66fd158 (diff) |
Merge tag 'drm-fixes-2020-03-20' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie:
"Hope you are well hiding out above the garage. A few amdgpu changes
but nothing too major. I've had a wisdom tooth out this week so
haven't been to on top of things, but all seems good.
core:
- fix lease warning
i915:
- Track active elements during dequeue
- Fix failure to handle all MCR ranges
- Revert unnecessary workaround
amdgpu:
- Pageflip fix
- VCN clockgating fixes
- GPR debugfs fix for umr
- GPU reset fix
- eDP fix for MBP
- DCN2.x fix
dw-hdmi:
- fix AVI frame colorimetry
komeda:
- fix compiler warning
bochs:
- downgrade a binding failure to a warning"
* tag 'drm-fixes-2020-03-20' of git://anongit.freedesktop.org/drm/drm:
drm/amd/display: Fix pageflip event race condition for DCN.
drm/amdgpu: fix typo for vcn2.5/jpeg2.5 idle check
drm/amdgpu: fix typo for vcn2/jpeg2 idle check
drm/amdgpu: fix typo for vcn1 idle check
drm/lease: fix WARNING in idr_destroy
drm/i915: Handle all MCR ranges
Revert "drm/i915/tgl: Add extra hdc flush workaround"
drm/i915/execlists: Track active elements during dequeue
drm/bochs: downgrade pci_request_region failure from error to warning
drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
drm/amdgpu: add fbdev suspend/resume on gpu reset
drm/amd/amdgpu: Fix GPR read from debugfs (v2)
drm/amd/display: fix typos for dcn20_funcs and dcn21_funcs struct
drm/komeda: mark PM functions as __maybe_unused
drm/bridge: dw-hdmi: fix AVI frame colorimetry
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/arm/display/komeda/komeda_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/bochs/bochs_hw.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_lease.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_lrc.c | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_workarounds.c | 25 |
17 files changed, 104 insertions, 83 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f24ed9a1a3e5..337d7cdce8e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ssize_t result = 0; uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; - if (size & 3 || *pos & 3) + if (size > 4096 || size & 3 || *pos & 3) return -EINVAL; /* decode offset */ - offset = *pos & GENMASK_ULL(11, 0); + offset = (*pos & GENMASK_ULL(11, 0)) >> 2; se = (*pos & GENMASK_ULL(19, 12)) >> 12; sh = (*pos & GENMASK_ULL(27, 20)) >> 20; cu = (*pos & GENMASK_ULL(35, 28)) >> 28; @@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = data[offset++]; + value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { result = r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39cd545976b7..b8975857d60d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, if (r) goto out; + amdgpu_fbdev_set_suspend(tmp_adev, 0); + /* must succeed. */ amdgpu_ras_resume(tmp_adev); @@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); + amdgpu_fbdev_set_suspend(adev, 1); + /* disable ras on ALL IPs */ if (!(in_ras_intr && !use_baco) && amdgpu_device_ip_need_full_reset(tmp_adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index ff2e6e1ccde7..6173951db7b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE); if (enable) { - if (jpeg_v2_0_is_idle(handle)) + if (!jpeg_v2_0_is_idle(handle)) return -EBUSY; jpeg_v2_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index c6d046df4b70..c04c2078a7c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, continue; if (enable) { - if (jpeg_v2_5_is_idle(handle)) + if (!jpeg_v2_5_is_idle(handle)) return -EBUSY; jpeg_v2_5_enable_clock_gating(adev, i); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 71f61afdc655..09b0572b838d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v1_0_is_idle(handle)) + if (!vcn_v1_0_is_idle(handle)) return -EBUSY; vcn_v1_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c387c81f8695..b7f17342bbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v2_0_is_idle(handle)) + if (!vcn_v2_0_is_idle(handle)) return -EBUSY; vcn_v2_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 2d64ba1adf99..678253d81154 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle, return 0; if (enable) { - if (vcn_v2_5_is_idle(handle)) + if (!vcn_v2_5_is_idle(handle)) return -EBUSY; vcn_v2_5_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e997251a8b57..6240259b3a93 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state), + acrtc_state->active_planes); amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); drm_crtc_handle_vblank(&acrtc->base); @@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) &acrtc_state->vrr_params.adjust); } - if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { + /* + * If there aren't any active_planes then DCH HUBP may be clock-gated. + * In that case, pageflip completion interrupts won't fire and pageflip + * completion events won't get delivered. Prevent this by sending + * pending pageflip events from here if a flip is still pending. + * + * If any planes are enabled, use dm_pflip_high_irq() instead, to + * avoid race conditions between flip programming and completion, + * which could cause too early flip completion events. + */ + if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && + acrtc_state->active_planes == 0) { if (acrtc->event) { drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); acrtc->event = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb731c1d30b1..fd9e69634c50 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link) sink_id.ieee_device_id, sizeof(sink_id.ieee_device_id)); + /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; + + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, + sizeof(str_mbp_2017))) { + link->reported_link_cap.link_rate = 0x0c; + } + } + core_link_read_dpcd( link, DP_SINK_HW_REVISION_START, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index d51e02fdab4d..5e640f17d3d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = { .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, - .dsc_pg_control = NULL, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 4861aa5c59ae..fddbd59bf4f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, - .dsc_pg_control = NULL, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index ea5cd1e17304..e7933930a657 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = { MODULE_DEVICE_TABLE(of, komeda_of_match); -static int komeda_rt_pm_suspend(struct device *dev) +static int __maybe_unused komeda_rt_pm_suspend(struct device *dev) { struct komeda_drv *mdrv = dev_get_drvdata(dev); return komeda_dev_suspend(mdrv->mdev); } -static int komeda_rt_pm_resume(struct device *dev) +static int __maybe_unused komeda_rt_pm_resume(struct device *dev) { struct komeda_drv *mdrv = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index b615b7dfdd9d..a4fc4e6aee39 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) { - DRM_ERROR("Cannot request framebuffer\n"); - return -EBUSY; - } + if (pci_request_region(pdev, 0, "bochs-drm") != 0) + DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); bochs->fb_map = ioremap(addr, size); if (bochs->fb_map == NULL) { diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 67fca439bbfb..24965e53d351 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) frame.colorspace = HDMI_COLORSPACE_RGB; /* Set up colorimetry */ - switch (hdmi->hdmi_data.enc_out_encoding) { - case V4L2_YCBCR_ENC_601: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else + if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { + switch (hdmi->hdmi_data.enc_out_encoding) { + case V4L2_YCBCR_ENC_601: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + case V4L2_YCBCR_ENC_709: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_709; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; + break; + default: /* Carries no data */ frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + } + } else { + frame.colorimetry = HDMI_COLORIMETRY_NONE; frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; - case V4L2_YCBCR_ENC_709: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else - frame.colorimetry = HDMI_COLORIMETRY_ITU_709; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; - break; - default: /* Carries no data */ - frame.colorimetry = HDMI_COLORIMETRY_ITU_601; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } frame.scan_mode = HDMI_SCAN_MODE_NONE; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index b481cafdde28..825abe38201a 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, } DRM_DEBUG_LEASE("Creating lease\n"); + /* lessee will take the ownership of leases */ lessee = drm_lease_create(lessor, &leases); if (IS_ERR(lessee)) { ret = PTR_ERR(lessee); + idr_destroy(&leases); goto out_leases; } @@ -580,7 +582,6 @@ out_lessee: out_leases: put_unused_fd(fd); - idr_destroy(&leases); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 940e7f7df69a..31455eceeb0c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, spin_unlock(&old->breadcrumbs.irq_lock); } -static struct i915_request * -last_active(const struct intel_engine_execlists *execlists) -{ - struct i915_request * const *last = READ_ONCE(execlists->active); - - while (*last && i915_request_completed(*last)) - last++; - - return *last; -} - #define for_each_waiter(p__, rq__) \ list_for_each_entry_lockless(p__, \ &(rq__)->sched.waiters_list, \ @@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists) (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); } -static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, + const struct i915_request *rq) { - struct i915_request *rq; - - rq = last_active(&engine->execlists); if (!rq) return 0; @@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) return READ_ONCE(engine->props.preempt_timeout_ms); } -static void set_preempt_timeout(struct intel_engine_cs *engine) +static void set_preempt_timeout(struct intel_engine_cs *engine, + const struct i915_request *rq) { if (!intel_engine_has_preempt_reset(engine)) return; set_timer_ms(&engine->execlists.preempt, - active_preempt_timeout(engine)); + active_preempt_timeout(engine, rq)); } static inline void clear_ports(struct i915_request **ports, int count) @@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request **port = execlists->pending; struct i915_request ** const last_port = port + execlists->port_mask; + struct i915_request * const *active; struct i915_request *last; struct rb_node *rb; bool submit = false; @@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * i.e. we will retrigger preemption following the ack in case * of trouble. */ - last = last_active(execlists); + active = READ_ONCE(execlists->active); + while ((last = *active) && i915_request_completed(last)) + active++; + if (last) { if (need_preempt(engine, last, rb)) { ENGINE_TRACE(engine, @@ -2110,7 +2102,7 @@ done: * Skip if we ended up with exactly the same set of requests, * e.g. trying to timeslice a pair of ordered contexts */ - if (!memcmp(execlists->active, execlists->pending, + if (!memcmp(active, execlists->pending, (port - execlists->pending + 1) * sizeof(*port))) { do execlists_schedule_out(fetch_and_zero(port)); @@ -2121,7 +2113,7 @@ done: clear_ports(port + 1, last_port - port); execlists_submit_ports(engine); - set_preempt_timeout(engine); + set_preempt_timeout(engine, *active); } else { skip_submit: ring_set_paused(engine, 0); @@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request, *cs++ = preparser_disable(false); intel_ring_advance(request, cs); - - /* - * Wa_1604544889:tgl - */ - if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) { - flags = 0; - flags |= PIPE_CONTROL_CS_STALL; - flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH; - - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; - - cs = intel_ring_begin(request, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cs = gen8_emit_pipe_control(cs, flags, - LRC_PPHWSP_SCRATCH_ADDR); - intel_ring_advance(request, cs); - } } return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 173a7f2d109f..6c2f8462e0f3 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1529,15 +1529,34 @@ err_obj: return ERR_PTR(err); } +static const struct { + u32 start; + u32 end; +} mcr_ranges_gen8[] = { + { .start = 0x5500, .end = 0x55ff }, + { .start = 0x7000, .end = 0x7fff }, + { .start = 0x9400, .end = 0x97ff }, + { .start = 0xb000, .end = 0xb3ff }, + { .start = 0xe000, .end = 0xe7ff }, + {}, +}; + static bool mcr_range(struct drm_i915_private *i915, u32 offset) { + int i; + + if (INTEL_GEN(i915) < 8) + return false; + /* - * Registers in this range are affected by the MCR selector + * Registers in these ranges are affected by the MCR selector * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) - return true; + for (i = 0; mcr_ranges_gen8[i].start; i++) + if (offset >= mcr_ranges_gen8[i].start && + offset <= mcr_ranges_gen8[i].end) + return true; return false; } |