diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-01-05 15:06:02 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-01-05 15:06:02 -0800 |
commit | ed40875dd4b4c7b5c991db9e06c984180ab0b3ce (patch) | |
tree | 45b93c6b3ff2a46f11560fd5ed6ca8d666cf269b /drivers | |
parent | c433eb70f37de2514f3ae3d43dd7e4a75493fe48 (diff) | |
parent | 2471eb5fb6e1433e28426ece235e3730348019ec (diff) |
Merge tag 'drm-intel-fixes-2017-01-05' of git://anongit.freedesktop.org/git/drm-intel
Pull i915 drm fixes from Jani Nikula:
"Here's a bunch of drm/i915 fixes for v4.10-rc3. It includes GVT-g
fixes.
My new year's resolution is to start using signed tags for pulls. If
that feels like a déjà vu, it's ((new year's) resolution), not (new
(year's resolution))"
[ Taking this directly from Jani because Dave Airlie is only partially
connected right now. - Linus ]
* tag 'drm-intel-fixes-2017-01-05' of git://anongit.freedesktop.org/git/drm-intel:
drm/i915: Prevent timeline updates whilst performing reset
drm/i915: Silence allocation failure during sg_trim()
drm/i915: Don't clflush before release phys object
drm/i915: Fix oops in overlay due to frontbuffer tracking
drm/i915: Fix oopses in the overlay code due to i915_gem_active stuff
drm/i915: Initialize overlay->last_flip properly
drm/i915: Move the min_pixclk[] handling to the end of readout
drm/i915: Force VDD off on the new power seqeuencer before starting to use it
drm/i915/gvt: fix typo in cfg_space range check
drm/i915/gvt: fix an issue in emulating cfg space PCI_COMMAND
drm/i915/gvt/kvmgt: trival: code cleanup
drm/i915/gvt/kvmgt: prevent double-release of vgpu
drm/i915/gvt/kvmgt: check returned slot for gfn
drm/i915/gvt/kvmgt: dereference the pointer within lock
drm/i915/gvt: reset the GGTT entry when vGPU created
drm/i915/gvt: fix an error in opregion handling
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_request.h | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 9 |
11 files changed, 190 insertions, 45 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index db516382a4d4..711c31c8d8b4 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 changed = old ^ new; int ret; + memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; @@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, return ret; } - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); return 0; } @@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, if (WARN_ON(bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) + if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) return -EINVAL; /* First check if it's PCI_COMMAND */ diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 7eaaf1c9ed2b..6c5fdf5b2ce2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) INIT_LIST_HEAD(>t->oos_page_list_head); INIT_LIST_HEAD(>t->post_shadow_list_head); + intel_vgpu_reset_ggtt(vgpu); + ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, NULL, 1, 0); if (IS_ERR(ggtt_mm)) { @@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; + void *page_addr; gvt_dbg_core("init gtt\n"); @@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } + gvt->gtt.scratch_ggtt_page = + alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + if (!gvt->gtt.scratch_ggtt_page) { + gvt_err("fail to allocate scratch ggtt page\n"); + return -ENOMEM; + } + + page_addr = page_address(gvt->gtt.scratch_ggtt_page); + + gvt->gtt.scratch_ggtt_mfn = + intel_gvt_hypervisor_virt_to_mfn(page_addr); + if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { + gvt_err("fail to translate scratch ggtt page\n"); + __free_page(gvt->gtt.scratch_ggtt_page); + return -EFAULT; + } + if (enable_out_of_sync) { ret = setup_spt_oos(gvt); if (ret) { @@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { + __free_page(gvt->gtt.scratch_ggtt_page); + if (enable_out_of_sync) clean_spt_oos(gvt); } + +/** + * intel_vgpu_reset_ggtt - reset the GGTT entry + * @vgpu: a vGPU + * + * This function is called at the vGPU create stage + * to reset all the GGTT entries. + * + */ +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + u32 index; + u32 offset; + u32 num_entries; + struct intel_gvt_gtt_entry e; + + memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); + e.type = GTT_TYPE_GGTT_PTE; + ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); + e.val64 |= _PAGE_PRESENT; + + index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; + num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; + for (offset = 0; offset < num_entries; offset++) + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); + + index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; + num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; + for (offset = 0; offset < num_entries; offset++) + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d250013bc37b..b315ab3593ec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -81,6 +81,9 @@ struct intel_gvt_gtt { struct list_head oos_page_use_list_head; struct list_head oos_page_free_list_head; struct list_head mm_lru_list_head; + + struct page *scratch_ggtt_page; + unsigned long scratch_ggtt_mfn; }; enum { @@ -202,6 +205,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ad0e9364ee70..0af17016f33f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -175,6 +175,7 @@ struct intel_vgpu { struct notifier_block group_notifier; struct kvm *kvm; struct work_struct release_work; + atomic_t released; } vdev; #endif }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4dd6722a7339..934963970288 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -114,12 +114,15 @@ out: static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) { struct gvt_dma *entry; + kvm_pfn_t pfn; mutex_lock(&vgpu->vdev.cache_lock); + entry = __gvt_cache_find(vgpu, gfn); - mutex_unlock(&vgpu->vdev.cache_lock); + pfn = (entry == NULL) ? 0 : entry->pfn; - return entry == NULL ? 0 : entry->pfn; + mutex_unlock(&vgpu->vdev.cache_lock); + return pfn; } static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) @@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev) goto undo_iommu; } - return kvmgt_guest_init(mdev); + ret = kvmgt_guest_init(mdev); + if (ret) + goto undo_group; + + atomic_set(&vgpu->vdev.released, 0); + return ret; + +undo_group: + vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, + &vgpu->vdev.group_notifier); undo_iommu: vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, @@ -509,17 +521,26 @@ out: static void __intel_vgpu_release(struct intel_vgpu *vgpu) { struct kvmgt_guest_info *info; + int ret; if (!handle_valid(vgpu->handle)) return; - vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, + if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) + return; + + ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, &vgpu->vdev.iommu_notifier); - vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, + WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); + + ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, &vgpu->vdev.group_notifier); + WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); info = (struct kvmgt_guest_info *)vgpu->handle; kvmgt_guest_exit(info); + + vgpu->vdev.kvm = NULL; vgpu->handle = 0; } @@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work) { struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, vdev.release_work); + __intel_vgpu_release(vgpu); } @@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } spin_lock(&kvm->mmu_lock); @@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } spin_lock(&kvm->mmu_lock); @@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { - struct intel_vgpu *vgpu; - if (!info) { gvt_err("kvmgt_guest_info invalid\n"); return false; } - vgpu = info->vgpu; - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvmgt_protect_table_destroy(info); - gvt_cache_destroy(vgpu); + gvt_cache_destroy(info->vgpu); vfree(info); return true; diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d2a0fbc896c3..81cd921770c6 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) int i, ret; for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) + mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va + i * PAGE_SIZE); if (mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to get MFN from VA\n"); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4a31b7a891ec..1e505d30b71e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -244,14 +244,16 @@ err_phys: static void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, - struct sg_table *pages) + struct sg_table *pages, + bool needs_clflush) { GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); if (obj->mm.madv == I915_MADV_DONTNEED) obj->mm.dirty = false; - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && + if (needs_clflush && + (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) drm_clflush_sg(pages); @@ -263,7 +265,7 @@ static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) { - __i915_gem_object_release_shmem(obj, pages); + __i915_gem_object_release_shmem(obj, pages, false); if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; @@ -2231,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, struct sgt_iter sgt_iter; struct page *page; - __i915_gem_object_release_shmem(obj, pages); + __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); @@ -2322,7 +2324,7 @@ static void i915_sg_trim(struct sg_table *orig_st) if (orig_st->nents == orig_st->orig_nents) return; - if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL)) + if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) return; new_sg = new_st.sgl; @@ -2728,6 +2730,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) struct drm_i915_gem_request *request; struct i915_gem_context *incomplete_ctx; struct intel_timeline *timeline; + unsigned long flags; bool ring_hung; if (engine->irq_seqno_barrier) @@ -2763,13 +2766,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) if (i915_gem_context_is_default(incomplete_ctx)) return; + timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); + + spin_lock_irqsave(&engine->timeline->lock, flags); + spin_lock(&timeline->lock); + list_for_each_entry_continue(request, &engine->timeline->requests, link) if (request->ctx == incomplete_ctx) reset_request(request); - timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); list_for_each_entry(request, &timeline->requests, link) reset_request(request); + + spin_unlock(&timeline->lock); + spin_unlock_irqrestore(&engine->timeline->lock, flags); } void i915_gem_reset(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index e2b077df2da0..d229f47d1028 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active, rcu_assign_pointer(active->request, request); } +/** + * i915_gem_active_set_retire_fn - updates the retirement callback + * @active - the active tracker + * @fn - the routine called when the request is retired + * @mutex - struct_mutex used to guard retirements + * + * i915_gem_active_set_retire_fn() updates the function pointer that + * is called when the final request associated with the @active tracker + * is retired. + */ +static inline void +i915_gem_active_set_retire_fn(struct i915_gem_active *active, + i915_gem_retire_fn fn, + struct mutex *mutex) +{ + lockdep_assert_held(mutex); + active->retire = fn ?: i915_gem_retire_noop; +} + static inline struct drm_i915_gem_request * __i915_gem_active_peek(const struct i915_gem_active *active) { diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6daad8613760..3dc8724df400 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -16791,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = crtc->config; - int pixclk = 0; __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); memset(crtc_state, 0, sizeof(*crtc_state)); @@ -16803,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->base.enabled = crtc_state->base.enable; crtc->active = crtc_state->base.active; - if (crtc_state->base.active) { + if (crtc_state->base.active) dev_priv->active_crtcs |= 1 << crtc->pipe; - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - pixclk = ilk_pipe_pixel_rate(crtc_state); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - pixclk = crtc_state->base.adjusted_mode.crtc_clock; - else - WARN_ON(dev_priv->display.modeset_calc_cdclk); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } - - dev_priv->min_pixclk[crtc->pipe] = pixclk; - readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", @@ -16892,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) } for_each_intel_crtc(dev, crtc) { + int pixclk = 0; + crtc->base.hwmode = crtc->config->base.adjusted_mode; memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); @@ -16919,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) */ crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + pixclk = ilk_pipe_pixel_rate(crtc->config); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pixclk = crtc->config->base.adjusted_mode.crtc_clock; + else + WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); + drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); } + dev_priv->min_pixclk[crtc->pipe] = pixclk; + intel_pipe_config_sanity_check(dev_priv, crtc->config); } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d9bc19be855e..0b8e8eb85c19 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, struct intel_dp *intel_dp); static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, - struct intel_dp *intel_dp); + struct intel_dp *intel_dp, + bool force_disable_vdd); static void intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); @@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); /* * Even vdd force doesn't work until we've made @@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) * Only the HW needs to be reprogrammed, the SW state is fixed and * has been setup during connector init. */ - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); return 0; } @@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) port_name(port), pipe_name(intel_dp->pps_pipe)); intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); } void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) @@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); } static void vlv_pre_enable_dp(struct intel_encoder *encoder, @@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, - struct intel_dp *intel_dp) + struct intel_dp *intel_dp, + bool force_disable_vdd) { struct drm_i915_private *dev_priv = to_i915(dev); u32 pp_on, pp_off, pp_div, port_sel = 0; @@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, intel_pps_get_registers(dev_priv, intel_dp, ®s); + /* + * On some VLV machines the BIOS can leave the VDD + * enabled even on power seqeuencers which aren't + * hooked up to any port. This would mess up the + * power domain tracking the first time we pick + * one of these power sequencers for use since + * edp_panel_vdd_on() would notice that the VDD was + * already on and therefore wouldn't grab the power + * domain reference. Disable VDD first to avoid this. + * This also avoids spuriously turning the VDD on as + * soon as the new power seqeuencer gets initialized. + */ + if (force_disable_vdd) { + u32 pp = ironlake_get_pp_control(intel_dp); + + WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); + + if (pp & EDP_FORCE_VDD) + DRM_DEBUG_KMS("VDD already on, disabling first\n"); + + pp &= ~EDP_FORCE_VDD; + + I915_WRITE(regs.pp_ctrl, pp); + } + pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | @@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev, vlv_initial_power_sequencer_setup(intel_dp); } else { intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); } } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index fd0e4dac7cc1..e589e17876dc 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay, { GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, &overlay->i915->drm.struct_mutex)); - overlay->last_flip.retire = retire; + i915_gem_active_set_retire_fn(&overlay->last_flip, retire, + &overlay->i915->drm.struct_mutex); i915_gem_active_set(&overlay->last_flip, req); i915_add_request(req); } @@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret) goto out_unpin; - i915_gem_track_fb(overlay->vma->obj, new_bo, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL, + vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe)); overlay->old_vma = overlay->vma; overlay->vma = vma; @@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) overlay->contrast = 75; overlay->saturation = 146; + init_request_active(&overlay->last_flip, NULL); + regs = intel_overlay_map_regs(overlay); if (!regs) goto out_unpin_bo; |