diff options
author | Dave Airlie <airlied@redhat.com> | 2020-04-10 06:42:21 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-04-10 06:42:24 +1000 |
commit | 1287c880ba45924ab41166cdf02766b2e3ac9d56 (patch) | |
tree | 9b6bf83b6f6a72b76c083e793ea79b319ca71aea /drivers | |
parent | c445c1668dca9cb908f75e000ab20351bce7c416 (diff) | |
parent | 1aaea8476d9f014667d2cb24819f9bcaf3ebb7a4 (diff) |
Merge tag 'drm-intel-next-fixes-2020-04-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- Flush all the reloc_gpu batch (Chris)
- Ignore readonly failures when updating relocs (Chris)
- Fill all the unused space in the GGTT (Chris)
- Return the right vswing table (Jose)
- Don't enable DDI IO power on a TypeC port in TBT mode for ICL+ (Imre)
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200408215952.GA1623934@intel.com
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_ddi.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ggtt.c | 37 |
3 files changed, 42 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 73d0f4648c06..2c617c98db3a 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -947,7 +947,8 @@ static const struct cnl_ddi_buf_trans * ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, int *n_entries) { - if (type == INTEL_OUTPUT_DP && rate > 270000) { + if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP && + rate > 270000) { *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3); return ehl_combo_phy_ddi_translations_hbr2_hbr3; } @@ -959,7 +960,7 @@ static const struct cnl_ddi_buf_trans * tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, int *n_entries) { - if (type != INTEL_OUTPUT_DP) { + if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); } else if (rate > 270000) { *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); @@ -1869,7 +1870,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, return; dig_port = enc_to_dig_port(encoder); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 36d069504836..b7440f06c5e2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -896,11 +896,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) static void reloc_gpu_flush(struct reloc_cache *cache) { - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); + struct drm_i915_gem_object *obj = cache->rq->batch->obj; + + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); - i915_gem_object_unpin_map(cache->rq->batch->obj); + __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); + i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(cache->rq->engine->gt); @@ -1477,10 +1479,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); - if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { - remain = -EFAULT; - goto out; - } + __put_user(offset, + &urelocs[r - stack].presumed_offset); } } while (r++, --count); urelocs += ARRAY_SIZE(stack); diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index aed498a0d032..4c5a209cb669 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - struct sgt_iter sgt_iter; - gen8_pte_t __iomem *gtt_entries; const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *gte; + gen8_pte_t __iomem *end; + struct sgt_iter iter; dma_addr_t addr; /* @@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * not to allow the user to override access to a read only page. */ - gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(gtt_entries++, pte_encode | addr); + gte = (gen8_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + + for_each_sgt_daddr(addr, iter, vma->pages) + gen8_set_pte(gte++, pte_encode | addr); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + gen8_set_pte(gte++, vm->scratch[0].encode); /* * We want to flush the TLBs only after we're certain all the PTE @@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; + gen6_pte_t __iomem *gte; + gen6_pte_t __iomem *end; struct sgt_iter iter; dma_addr_t addr; + gte = (gen6_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, iter, vma->pages) - iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); + iowrite32(vm->pte_encode(addr, level, flags), gte++); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + iowrite32(vm->scratch[0].encode, gte++); /* * We want to flush the TLBs only after we're certain all the PTE |