diff options
author | Dave Airlie <airlied@redhat.com> | 2016-04-22 09:09:11 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-04-22 09:09:11 +1000 |
commit | 762ce4458974534a2407cc924db05840c89105df (patch) | |
tree | 391e0a78f8ee98b3ccd362b0aa70165b33549511 | |
parent | 5f44abd041c5f3be76d57579ab254d78e601315b (diff) | |
parent | 31318a922395ec9e78d6e2ddf70779355afc7594 (diff) |
Merge tag 'drm-intel-fixes-2016-04-21' of git://anongit.freedesktop.org/drm-intel into drm-fixes
Hi Dave, fixes all around, all but one are cc: stable material, the most
important ones are likely the Skylake hang fixes from Mika.
* tag 'drm-intel-fixes-2016-04-21' of git://anongit.freedesktop.org/drm-intel:
drm/i915: Use fw_domains_put_with_fifo() on HSW
drm/i915: Force ringbuffers to not be at offset 0
drm/i915: Adjust size of PIPE_CONTROL used for gen8 render seqno write
drm/i915/skl: Fix spurious gpu hang with gt3/gt4 revs
drm/i915/skl: Fix rc6 based gpu/system hang
drm/i915/userptr: Hold mmref whilst calling get-user-pages
drm/i915: Fixup the free space logic in ring_prepare
drm/i915/skl+: Use plane size for relative data rate calculation
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_uncore.c | 6 |
6 files changed, 75 insertions, 41 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 10480939159c..daba7ebb9699 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table { /* WaRsDisableCoarsePowerGating:skl,bxt */ #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ - IS_SKL_REVID(dev, 0, SKL_REVID_F0))) + IS_SKL_GT3(dev) || \ + IS_SKL_GT4(dev)) + /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * even when in MSI mode. This results in spurious interrupt warnings if the diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 18ba8139e922..4d30b60defda 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; - down_read(&mm->mmap_sem); - while (pinned < npages) { - ret = get_user_pages_remote(work->task, mm, - obj->userptr.ptr + pinned * PAGE_SIZE, - npages - pinned, - !obj->userptr.read_only, 0, - pvec + pinned, NULL); - if (ret < 0) - break; - - pinned += ret; + ret = -EFAULT; + if (atomic_inc_not_zero(&mm->mm_users)) { + down_read(&mm->mmap_sem); + while (pinned < npages) { + ret = get_user_pages_remote + (work->task, mm, + obj->userptr.ptr + pinned * PAGE_SIZE, + npages - pinned, + !obj->userptr.read_only, 0, + pvec + pinned, NULL); + if (ret < 0) + break; + + pinned += ret; + } + up_read(&mm->mmap_sem); + mmput(mm); } - up_read(&mm->mmap_sem); } mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6a978ce80244..5c6080fd0968 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) if (unlikely(total_bytes > remain_usable)) { /* * The base request will fit but the reserved space - * falls off the end. So only need to to wait for the - * reserved size after flushing out the remainder. + * falls off the end. So don't need an immediate wrap + * and only need to effectively wait for the reserved + * size space from the start of ringbuffer. */ wait_bytes = remain_actual + ringbuf->reserved_size; - need_wrap = true; } else if (total_bytes > ringbuf->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) struct intel_ringbuffer *ringbuf = request->ringbuf; int ret; - ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); + ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); if (ret) return ret; + /* We're using qword write, seqno should be aligned to 8 bytes. */ + BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); + /* w/a for post sync ops following a GPGPU operation we * need a prior CS_STALL, which is emitted by the flush * following the batch. */ - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); intel_logical_ring_emit(ringbuf, (PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | @@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + /* We're thrashing one dword of HWS. */ + intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); + intel_logical_ring_emit(ringbuf, MI_NOOP); return intel_logical_ring_advance_and_submit(request); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 347d4df49a9b..8ed3cf34f82d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, const struct drm_plane_state *pstate, int y) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; + uint32_t width = 0, height = 0; + + width = drm_rect_width(&intel_pstate->src) >> 16; + height = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(pstate->rotation)) + swap(width, height); /* for planar format */ if (fb->pixel_format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ - return intel_crtc->config->pipe_src_w * - intel_crtc->config->pipe_src_h * + return width * height * drm_format_plane_cpp(fb->pixel_format, 0); else /* uv-plane data rate */ - return (intel_crtc->config->pipe_src_w/2) * - (intel_crtc->config->pipe_src_h/2) * + return (width / 2) * (height / 2) * drm_format_plane_cpp(fb->pixel_format, 1); } /* for packed formats */ - return intel_crtc->config->pipe_src_w * - intel_crtc->config->pipe_src_h * - drm_format_plane_cpp(fb->pixel_format, 0); + return width * height * drm_format_plane_cpp(fb->pixel_format, 0); } /* @@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct drm_framebuffer *fb = plane->state->fb; int id = skl_wm_plane_id(intel_plane); - if (fb == NULL) + if (!to_intel_plane_state(plane->state)->visible) continue; + if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, uint16_t plane_blocks, y_plane_blocks = 0; int id = skl_wm_plane_id(intel_plane); - if (pstate->fb == NULL) + if (!to_intel_plane_state(pstate)->visible) continue; if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, { struct drm_plane *plane = &intel_plane->base; struct drm_framebuffer *fb = plane->state->fb; + struct intel_plane_state *intel_pstate = + to_intel_plane_state(plane->state); uint32_t latency = dev_priv->wm.skl_latency[level]; uint32_t method1, method2; uint32_t plane_bytes_per_line, plane_blocks_per_line; uint32_t res_blocks, res_lines; uint32_t selected_result; uint8_t cpp; + uint32_t width = 0, height = 0; - if (latency == 0 || !cstate->base.active || !fb) + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) return false; + width = drm_rect_width(&intel_pstate->src) >> 16; + height = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(plane->state->rotation)) + swap(width, height); + cpp = drm_format_plane_cpp(fb->pixel_format, 0); method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), cpp, latency); method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, - cstate->pipe_src_w, - cpp, fb->modifier[0], + width, + cpp, + fb->modifier[0], latency); - plane_bytes_per_line = cstate->pipe_src_w * cpp; + plane_bytes_per_line = width * cpp; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 45ce45a5e122..9121646d7c4d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || + if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); @@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { + /* This is tied to WaForceContextSaveRestoreNonCoherent */ + if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = ringbuf->obj; + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ + unsigned flags = PIN_OFFSET_BIAS | 4096; int ret; if (HAS_LLC(dev_priv) && !obj->stolen) { - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); if (ret) return ret; @@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, return -ENOMEM; } } else { - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, + flags | PIN_MAPPABLE); if (ret) return ret; @@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) if (unlikely(total_bytes > remain_usable)) { /* * The base request will fit but the reserved space - * falls off the end. So only need to to wait for the - * reserved size after flushing out the remainder. + * falls off the end. So don't need an immediate wrap + * and only need to effectively wait for the reserved + * size space from the start of ringbuffer. */ wait_bytes = remain_actual + ringbuf->reserved_size; - need_wrap = true; } else if (total_bytes > ringbuf->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 436d8f2b8682..68b6f69aa682 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; + if (IS_HASWELL(dev)) + dev_priv->uncore.funcs.force_wake_put = + fw_domains_put_with_fifo; + else + dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); } else if (IS_IVYBRIDGE(dev)) { |