diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-21 14:16:42 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-21 14:16:42 -0700 |
commit | 24a1635a41bccb5cc426eaef8b88c7e0961ef6bb (patch) | |
tree | eefc2f6d4a2bacd056d119d9331447e227bf5652 /drivers | |
parent | f79ec886f994918de057dd224fa2dfdc162bb3c3 (diff) | |
parent | 5896ec77d70d33dd38a455b0aa5f3154aeecea09 (diff) |
Merge tag 'drm-fixes-for-v4.13-rc2' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
"A bunch of fixes for rc2: two imx regressions, vc4 fix, dma-buf fix,
some displayport mst fixes, and an amdkfd fix.
Nothing too crazy, I assume we just haven't see much rc1 testing yet"
* tag 'drm-fixes-for-v4.13-rc2' of git://people.freedesktop.org/~airlied/linux:
drm/mst: Avoid processing partially received up/down message transactions
drm/mst: Avoid dereferencing a NULL mstb in drm_dp_mst_handle_up_req()
drm/mst: Fix error handling during MST sideband message reception
drm/imx: parallel-display: Accept drm_of_find_panel_or_bridge failure
drm/imx: fix typo in ipu_plane_formats[]
drm/vc4: Fix VBLANK handling in crtc->enable() path
dma-buf/fence: Avoid use of uninitialised timestamp
drm/amdgpu: Remove unused field kgd2kfd_shared_resources.num_mec
drm/radeon: Remove initialization of shared_resources.num_mec
drm/amdkfd: Remove unused references to shared_resources.num_mec
drm/amdgpu: Fix KFD oversubscription by tracking queues correctly
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma-buf/dma-fence.c | 17 | ||||
-rw-r--r-- | drivers/dma-buf/sync_debug.c | 2 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/imx/ipuv3-plane.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/imx/parallel-display.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_kfd.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_crtc.c | 66 |
13 files changed, 93 insertions, 64 deletions
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 57da14c15987..56e0a0e1b600 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence) if (WARN_ON(!fence)) return -EINVAL; - if (!ktime_to_ns(fence->timestamp)) { - fence->timestamp = ktime_get(); - smp_mb__before_atomic(); - } - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { ret = -EINVAL; @@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence) * we might have raced with the unlocked dma_fence_signal, * still run through all callbacks */ - } else + } else { + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); + } list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { list_del_init(&cur->node); @@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence) if (!fence) return -EINVAL; - if (!ktime_to_ns(fence->timestamp)) { - fence->timestamp = ktime_get(); - smp_mb__before_atomic(); - } - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return -EINVAL; + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 82a6e7f6d37f..59a3b2f8ee91 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s, show ? "_" : "", sync_status_str(status)); - if (status) { + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) { struct timespec64 ts64 = ktime_to_timespec64(fence->timestamp); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 545e2c5c4815..d7e219d2669d 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence, sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); - info->timestamp_ns = ktime_to_ns(fence->timestamp); + while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && + !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) + cpu_relax(); + info->timestamp_ns = + test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? + ktime_to_ns(fence->timestamp) : + ktime_set(0, 0); } static long sync_file_ioctl_fence_info(struct sync_file *sync_file, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5f8ada1d872b..37971d9402e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) if (adev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = 0xFF00, - .num_mec = adev->gfx.mec.num_mec, .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe }; @@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant */ - last_valid_bit = adev->gfx.mec.num_mec + last_valid_bit = 1 /* only first MEC can have compute queues */ * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 88187bfc5ea3..3f95f7cb4019 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; - /* We only use the first MEC */ - if (kfd->shared_resources.num_mec > 1) - kfd->shared_resources.num_mec = 1; - /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * kfd->device_info->mqd_size_aligned; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 955aa304ff48..602769ced3bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) return false; } -unsigned int get_mec_num(struct device_queue_manager *dqm) -{ - BUG_ON(!dqm || !dqm->dev); - - return dqm->dev->shared_resources.num_mec; -} - unsigned int get_queues_num(struct device_queue_manager *dqm) { BUG_ON(!dqm || !dqm->dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 66b9615bc3c1..faf820a06400 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops); void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); -unsigned int get_mec_num(struct device_queue_manager *dqm); unsigned int get_queues_num(struct device_queue_manager *dqm); unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 91ef1484b3bb..36f376677a53 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources { /* Bit n == 1 means VMID n is available for KFD. */ unsigned int compute_vmid_bitmap; - /* number of mec available from the hardware */ - uint32_t num_mec; - /* number of pipes per mec */ uint32_t num_pipe_per_mec; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index bfd237c15e76..ae5f06895562 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, return false; } + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + /* get length contained in this portion */ msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_hdrlen = hdrlen; @@ -2164,7 +2171,7 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) { int len; u8 replyblock[32]; @@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); - return; + return false; } ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); - return; + return false; } replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk\n"); + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); - if (ret == false) + if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + curreply += len; replylen -= len; } + return true; } static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, false); + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->down_rep_recv.have_eomt) { struct drm_dp_sideband_msg_tx *txmsg; @@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, true); + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; @@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } - drm_dp_put_mst_branch_device(mstb); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } return ret; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 49546222c6d3..6276bb834b4f 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, - DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 636031a30e17..8aca20209cb8 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) /* port@1 is the output port */ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); - if (ret) + if (ret && ret != -ENODEV) return ret; imxpd->dev = dev; diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 699fe7f9b8bf..a2ab6dcdf4a2 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev) if (rdev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = 0xFF00, - .num_mec = 1, .num_pipe_per_mec = 4, .num_queue_per_pipe = 8 }; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 403bbd5f99a9..a12cc7ea99b6 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) SCALER_DISPSTATX_EMPTY); } +static void vc4_crtc_update_dlist(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + + if (crtc->state->event) { + unsigned long flags; + + crtc->state->event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&dev->event_lock, flags); + vc4_crtc->event = crtc->state->event; + crtc->state->event = NULL; + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + } +} + static void vc4_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) require_hvs_enabled(dev); + /* Enable vblank irq handling before crtc is started otherwise + * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist(). + */ + drm_crtc_vblank_on(crtc); + vc4_crtc_update_dlist(crtc); + /* Turn on the scaler, which will wait for vstart to start * compositing. */ @@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) /* Turn on the pixel valve, which will emit the vstart signal. */ CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); - - /* Enable vblank irq handling after crtc is started. */ - drm_crtc_vblank_on(crtc); } static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, @@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_plane *plane; bool debug_dump_regs = false; @@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); - if (crtc->state->event) { - unsigned long flags; - - crtc->state->event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - - spin_lock_irqsave(&dev->event_lock, flags); - vc4_crtc->event = crtc->state->event; - crtc->state->event = NULL; - - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - - spin_unlock_irqrestore(&dev->event_lock, flags); - } else { - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - } + /* Only update DISPLIST if the CRTC was already running and is not + * being disabled. + * vc4_crtc_enable() takes care of updating the dlist just after + * re-enabling VBLANK interrupts and before enabling the engine. + * If the CRTC is being disabled, there's no point in updating this + * information. + */ + if (crtc->state->active && old_state->active) + vc4_crtc_update_dlist(crtc); if (debug_dump_regs) { DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); |