diff options
author | Dave Airlie <airlied@redhat.com> | 2019-10-30 09:01:28 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2019-10-30 09:51:03 +1000 |
commit | 57c2af791b6c8087b6d8b56046838427d2ec0d73 (patch) | |
tree | d6ce203aac6f10fd9ee9aeb14c9f07fe59ae82c4 /drivers/gpu/drm | |
parent | 8c84b43f17cb0fa6543c20652aa2c6f0356bc686 (diff) | |
parent | 12a280c7286857119cf0d88c487f695e3a1c0912 (diff) |
Merge tag 'topic/mst-suspend-resume-reprobe-2019-10-29-2' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
* Handle UP requests asynchronously in the DP MST helpers, fixing
hotplug notifications and allowing us to implement suspend/resume
reprobing
* Add basic suspend/resume reprobing to the DP MST helpers
* Improve locking for link address reprobing and connection status
request handling in the DP MST helpers
* Miscellaneous refactoring in the DP MST helpers
* Add a Kconfig option to the DP MST helpers to enable tracking of
gets/puts for topology references for debugging purposes
Driver Changes:
* nouveau: Resume hotplug interrupts earlier, so that sideband
messages may be transmitted during resume and thus allow
suspend/resume reprobing for DP MST to work
* nouveau: Avoid grabbing runtime PM references when handling short DP
pulses, so that handling sideband messages in resume codepaths with the
DP MST helpers doesn't deadlock us
* i915, nouveau, amdgpu, radeon: Use detect_ctx for probing MST
connectors, so that we can grab the topology manager's atomic lock
Note: there's some amdgpu patches that I didn't realize were pushed
upstream already when creating this topic branch. When they fail to
apply, you can just ignore and skip them.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/a74c6446bc960190d195a751cb6d8a00a98f3974.camel@redhat.com
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/Kconfig | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 1176 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_dp.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_dp_mst.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/dispnv50/disp.c | 38 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_connector.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_display.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_dp_mst.c | 24 |
10 files changed, 988 insertions, 379 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index dba57ca0c9d9..617d9c3a86c3 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER help FBDEV helpers for KMS drivers. +config DRM_DEBUG_DP_MST_TOPOLOGY_REFS + bool "Enable refcount backtrace history in the DP MST helpers" + select STACKDEPOT + depends on DRM_KMS_HELPER + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debug tracing for topology refs in DRM's DP MST helpers. A + history of each topology reference/dereference will be printed to the + kernel log once a port or branch device's topology refcount reaches 0. + + This has the potential to use a lot of memory and print some very + large kernel messages. If in doubt, say "N". + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" depends on DRM diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 23bdcbcb477a..b3d4403b7d9f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1028,7 +1028,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr); + ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { drm_dp_mst_topology_mgr_set_mst(mgr, false); need_hotplug = true; @@ -1246,7 +1246,7 @@ static int dm_resume(void *handle) */ amdgpu_dm_irq_resume_early(adev); - /* On resume we need to rewrite the MSTM control bits to enable MST*/ + /* On resume we need to rewrite the MSTM control bits to enable MST*/ s3_handle_mst(ddev, false); /* Do detection*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index d9113ce0be09..3c1f8258291c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -123,21 +123,6 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, return result; } -static enum drm_connector_status -dm_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - - enum drm_connector_status status = - drm_dp_mst_detect_port( - connector, - &master->mst_mgr, - aconnector->port); - - return status; -} - static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { @@ -175,7 +160,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - .detect = dm_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, @@ -250,10 +234,22 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, return &to_amdgpu_dm_connector(connector)->mst_encoder->base; } +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); +} + static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b854a422a523..85bef73a6763 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -28,6 +28,13 @@ #include <linux/sched.h> #include <linux/seq_file.h> +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stackdepot.h> +#include <linux/sort.h> +#include <linux/timekeeping.h> +#include <linux/math64.h> +#endif + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> @@ -45,6 +52,12 @@ * protocol. The helpers contain a topology manager and bandwidth manager. * The helpers encapsulate the sending and received of sideband msgs. */ +struct drm_dp_pending_up_req { + struct drm_dp_sideband_msg_hdr hdr; + struct drm_dp_sideband_msg_req_body msg; + struct list_head next; +}; + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf); @@ -61,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -1393,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) } EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); -static void drm_dp_destroy_mst_branch_device(struct kref *kref) +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + +#define STACK_DEPTH 8 + +static noinline void +__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_ref_history *history, + enum drm_dp_mst_topology_ref_type type) { - struct drm_dp_mst_branch *mstb = - container_of(kref, struct drm_dp_mst_branch, topology_kref); - struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; - struct drm_dp_mst_port *port, *tmp; - bool wake_tx = false; + struct drm_dp_mst_topology_ref_entry *entry = NULL; + depot_stack_handle_t backtrace; + ulong stack_entries[STACK_DEPTH]; + uint n; + int i; - mutex_lock(&mgr->lock); - list_for_each_entry_safe(port, tmp, &mstb->ports, next) { - list_del(&port->next); - drm_dp_mst_topology_put_port(port); + n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); + backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); + if (!backtrace) + return; + + /* Try to find an existing entry for this backtrace */ + for (i = 0; i < history->len; i++) { + if (history->entries[i].backtrace == backtrace) { + entry = &history->entries[i]; + break; + } } - mutex_unlock(&mgr->lock); - /* drop any tx slots msg */ - mutex_lock(&mstb->mgr->qlock); - if (mstb->tx_slots[0]) { - mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[0] = NULL; - wake_tx = true; + /* Otherwise add one */ + if (!entry) { + struct drm_dp_mst_topology_ref_entry *new; + int new_len = history->len + 1; + + new = krealloc(history->entries, sizeof(*new) * new_len, + GFP_KERNEL); + if (!new) + return; + + entry = &new[history->len]; + history->len = new_len; + history->entries = new; + + entry->backtrace = backtrace; + entry->type = type; + entry->count = 0; } - if (mstb->tx_slots[1]) { - mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[1] = NULL; - wake_tx = true; + entry->count++; + entry->ts_nsec = ktime_get_ns(); +} + +static int +topology_ref_history_cmp(const void *a, const void *b) +{ + const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; + + if (entry_a->ts_nsec > entry_b->ts_nsec) + return 1; + else if (entry_a->ts_nsec < entry_b->ts_nsec) + return -1; + else + return 0; +} + +static inline const char * +topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) +{ + if (type == DRM_DP_MST_TOPOLOGY_REF_GET) + return "get"; + else + return "put"; +} + +static void +__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, + void *ptr, const char *type_str) +{ + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + int i; + + if (!buf) + return; + + if (!history->len) + goto out; + + /* First, sort the list so that it goes from oldest to newest + * reference entry + */ + sort(history->entries, history->len, sizeof(*history->entries), + topology_ref_history_cmp, NULL); + + drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", + type_str, ptr); + + for (i = 0; i < history->len; i++) { + const struct drm_dp_mst_topology_ref_entry *entry = + &history->entries[i]; + ulong *entries; + uint nr_entries; + u64 ts_nsec = entry->ts_nsec; + u64 rem_nsec = do_div(ts_nsec, 1000000000); + + nr_entries = stack_depot_fetch(entry->backtrace, &entries); + stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + + drm_printf(&p, " %d %ss (last at %5llu.%06llu):\n%s", + entry->count, + topology_ref_type_to_str(entry->type), + ts_nsec, rem_nsec / 1000, buf); } - mutex_unlock(&mstb->mgr->qlock); - if (wake_tx) - wake_up_all(&mstb->mgr->tx_waitq); + /* Now free the history, since this is the only time we expose it */ + kfree(history->entries); +out: + kfree(buf); +} - drm_dp_mst_put_mstb_malloc(mstb); +static __always_inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) +{ + __dump_topology_ref_history(&mstb->topology_ref_history, mstb, + "MSTB"); +} + +static __always_inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) +{ + __dump_topology_ref_history(&port->topology_ref_history, port, + "Port"); +} + +static __always_inline void +save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); +} + +static __always_inline void +save_port_topology_ref(struct drm_dp_mst_port *port, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(port->mgr, &port->topology_ref_history, type); +} + +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->topology_ref_history_lock); +} + +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_unlock(&mgr->topology_ref_history_lock); +} +#else +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} +static inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} +#define save_mstb_topology_ref(mstb, type) +#define save_port_topology_ref(port, type) +#endif + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + + drm_dp_mst_dump_mstb_topology_history(mstb); + + INIT_LIST_HEAD(&mstb->destroy_next); + + /* + * This can get called under mgr->mutex, so we need to perform the + * actual destruction of the mstb in another worker + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1453,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) { - int ret = kref_get_unless_zero(&mstb->topology_kref); + int ret; - if (ret) - DRM_DEBUG("mstb %p (%d)\n", mstb, - kref_read(&mstb->topology_kref)); + topology_ref_history_lock(mstb->mgr); + ret = kref_get_unless_zero(&mstb->topology_kref); + if (ret) { + DRM_DEBUG("mstb %p (%d)\n", + mstb, kref_read(&mstb->topology_kref)); + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); + } + + topology_ref_history_unlock(mstb->mgr); return ret; } @@ -1478,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) */ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); WARN_ON(kref_read(&mstb->topology_kref) == 0); kref_get(&mstb->topology_kref); DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); + + topology_ref_history_unlock(mstb->mgr); } /** @@ -1498,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); - kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); -} + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); -static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) -{ - struct drm_dp_mst_branch *mstb; - - switch (old_pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mstb = port->mstb; - port->mstb = NULL; - drm_dp_mst_topology_put_mstb(mstb); - break; - } + topology_ref_history_unlock(mstb->mgr); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); } static void drm_dp_destroy_port(struct kref *kref) @@ -1527,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref) container_of(kref, struct drm_dp_mst_port, topology_kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; - if (!port->input) { - kfree(port->cached_edid); + drm_dp_mst_dump_port_topology_history(port); - /* - * The only time we don't have a connector - * on an output port is if the connector init - * fails. - */ - if (port->connector) { - /* we can't destroy the connector here, as - * we might be holding the mode_config.mutex - * from an EDID retrieval */ - - mutex_lock(&mgr->destroy_connector_lock); - list_add(&port->next, &mgr->destroy_connector_list); - mutex_unlock(&mgr->destroy_connector_lock); - schedule_work(&mgr->destroy_connector_work); - return; - } - /* no need to clean up vcpi - * as if we have no connector we never setup a vcpi */ - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + /* There's nothing that needs locking to destroy an input port yet */ + if (port->input) { + drm_dp_mst_put_port_malloc(port); + return; } - drm_dp_mst_put_port_malloc(port); + + kfree(port->cached_edid); + + /* + * we can't destroy the connector here, as we might be holding the + * mode_config.mutex from an EDID retrieval + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&port->next, &mgr->destroy_port_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1579,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) { - int ret = kref_get_unless_zero(&port->topology_kref); + int ret; - if (ret) - DRM_DEBUG("port %p (%d)\n", port, - kref_read(&port->topology_kref)); + topology_ref_history_lock(port->mgr); + ret = kref_get_unless_zero(&port->topology_kref); + if (ret) { + DRM_DEBUG("port %p (%d)\n", + port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + } + topology_ref_history_unlock(port->mgr); return ret; } @@ -1603,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + WARN_ON(kref_read(&port->topology_kref) == 0); kref_get(&port->topology_kref); DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + + topology_ref_history_unlock(port->mgr); } /** @@ -1621,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); + + topology_ref_history_unlock(port->mgr); kref_put(&port->topology_kref, drm_dp_destroy_port); } @@ -1739,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -/* - * return sends link address for new mstb - */ -static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) { - int ret; - u8 rad[6], lct; - bool send_link = false; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + struct drm_dp_mst_branch *mstb; + u8 rad[8], lct; + int ret = 0; + + if (port->pdt == new_pdt) + return 0; + + /* Teardown the old pdt, if there is one */ + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* + * If the new PDT would also have an i2c bus, don't bother + * with reregistering it + */ + if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + new_pdt == DP_PEER_DEVICE_SST_SINK) { + port->pdt = new_pdt; + return 0; + } + + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + break; + } + + port->pdt = new_pdt; switch (port->pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); break; + case DP_PEER_DEVICE_MST_BRANCHING: lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", port); + goto out; + } - port->mstb = drm_dp_add_mst_branch_device(lct, rad); - if (port->mstb) { - port->mstb->mgr = port->mgr; - port->mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - send_link = true; - } + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); + + /* And make sure we send a link address for this */ + ret = 1; break; } - return send_link; + +out: + if (ret < 0) + port->pdt = DP_PEER_DEVICE_NONE; + return ret; } /** @@ -1903,44 +2118,130 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); static void +drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + char proppath[255]; + int ret; + + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = mgr->cbs->add_connector(mgr, port, proppath); + if (!port->connector) { + ret = -ENOMEM; + goto error; + } + + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { + port->cached_edid = drm_get_edid(port->connector, + &port->aux.ddc); + drm_connector_set_tile_property(port->connector); + } + + mgr->cbs->register_connector(port->connector); + return; + +error: + DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); +} + +/* + * Drop a topology reference, and unlink the port from the in-memory topology + * layout + */ +static void +drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + mutex_lock(&mgr->lock); + list_del(&port->next); + mutex_unlock(&mgr->lock); + drm_dp_mst_topology_put_port(port); +} + +static struct drm_dp_mst_port * +drm_dp_mst_add_port(struct drm_device *dev, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, u8 port_number) +{ + struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); + + if (!port) + return NULL; + + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); + port->parent = mstb; + port->port_num = port_number; + port->mgr = mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev->dev; + port->aux.is_remote = true; + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + + return port; +} + +static int drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_device *dev, struct drm_dp_link_addr_reply_port *port_msg) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - bool ret; - bool created = false; - int old_pdt = 0; - int old_ddps = 0; + int old_ddps = 0, ret; + u8 new_pdt = DP_PEER_DEVICE_NONE; + bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); if (!port) { - port = kzalloc(sizeof(*port), GFP_KERNEL); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); if (!port) - return; - kref_init(&port->topology_kref); - kref_init(&port->malloc_kref); - port->parent = mstb; - port->port_num = port_msg->port_number; - port->mgr = mstb->mgr; - port->aux.name = "DPMST"; - port->aux.dev = dev->dev; - port->aux.is_remote = true; - - /* - * Make sure the memory allocation for our parent branch stays - * around until our own memory allocation is released + return -ENOMEM; + created = true; + changed = true; + } else if (!port->input && port_msg->input_port && port->connector) { + /* Since port->connector can't be changed here, we create a + * new port if input_port changes from 0 to 1 */ - drm_dp_mst_get_mstb_malloc(mstb); - + drm_dp_mst_topology_unlink_port(mgr, port); + drm_dp_mst_topology_put_port(port); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); + if (!port) + return -ENOMEM; + changed = true; created = true; - } else { - old_pdt = port->pdt; + } else if (port->input && !port_msg->input_port) { + changed = true; + } else if (port->connector) { + /* We're updating a port that's exposed to userspace, so do it + * under lock + */ + drm_modeset_lock(&mgr->base.lock, NULL); + old_ddps = port->ddps; + changed = port->ddps != port_msg->ddps || + (port->ddps && + (port->ldps != port_msg->legacy_device_plug_status || + port->dpcd_rev != port_msg->dpcd_revision || + port->mcs != port_msg->mcs || + port->pdt != port_msg->peer_device_type || + port->num_sdp_stream_sinks != + port_msg->num_sdp_stream_sinks)); } - port->pdt = port_msg->peer_device_type; port->input = port_msg->input_port; + if (!port->input) + new_pdt = port_msg->peer_device_type; port->mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; @@ -1951,78 +2252,104 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, /* manage mstb port lists with mgr lock - take a reference for this list */ if (created) { - mutex_lock(&mstb->mgr->lock); + mutex_lock(&mgr->lock); drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); - mutex_unlock(&mstb->mgr->lock); + mutex_unlock(&mgr->lock); } if (old_ddps != port->ddps) { if (port->ddps) { if (!port->input) { - drm_dp_send_enum_path_resources(mstb->mgr, - mstb, port); + drm_dp_send_enum_path_resources(mgr, mstb, + port); } } else { port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - - ret = drm_dp_port_setup_pdt(port); - if (ret == true) - drm_dp_send_link_address(mstb->mgr, port->mstb); + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + send_link_addr = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT on port %p: %d\n", + port, ret); + goto fail; } - if (created && !port->input) { - char proppath[255]; - - build_mst_prop_path(mstb, port->port_num, proppath, - sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, - port, - proppath); - if (!port->connector) { - /* remove it from the port list */ - mutex_lock(&mstb->mgr->lock); - list_del(&port->next); - mutex_unlock(&mstb->mgr->lock); - /* drop port list reference */ - drm_dp_mst_topology_put_port(port); - goto out; - } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, - &port->aux.ddc); - drm_connector_set_tile_property(port->connector); - } - (*mstb->mgr->cbs->register_connector)(port->connector); + /* + * If this port wasn't just created, then we're reprobing because + * we're coming out of suspend. In this case, always resend the link + * address if there's an MSTB on this port + */ + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + send_link_addr = true; + + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (!port->input) + drm_dp_mst_port_add_connector(mstb, port); + + if (send_link_addr && port->mstb) { + ret = drm_dp_send_link_address(mgr, port->mstb); + if (ret == 1) /* MSTB below us changed */ + changed = true; + else if (ret < 0) + goto fail_put; } -out: /* put reference to this port */ drm_dp_mst_topology_put_port(port); + return changed; + +fail: + drm_dp_mst_topology_unlink_port(mgr, port); + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); +fail_put: + drm_dp_mst_topology_put_port(port); + return ret; } static void drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, struct drm_dp_connection_status_notify *conn_stat) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_pdt; - int old_ddps; - bool dowork = false; + int old_ddps, ret; + u8 new_pdt; + bool dowork = false, create_connector = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); if (!port) return; + if (port->connector) { + if (!port->input && conn_stat->input_port) { + /* + * We can't remove a connector from an already exposed + * port, so just throw the port out and make sure we + * reprobe the link address of it's parent MSTB + */ + drm_dp_mst_topology_unlink_port(mgr, port); + mstb->link_address_sent = false; + dowork = true; + goto out; + } + + /* Locking is only needed if the port's exposed to userspace */ + drm_modeset_lock(&mgr->base.lock, NULL); + } else if (port->input && !conn_stat->input_port) { + create_connector = true; + /* Reprobe link address so we get num_sdp_streams */ + mstb->link_address_sent = false; + dowork = true; + } + old_ddps = port->ddps; - old_pdt = port->pdt; - port->pdt = conn_stat->peer_device_type; + port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2034,17 +2361,27 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - if (drm_dp_port_setup_pdt(port)) - dowork = true; + new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; + + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + dowork = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT for port %p: %d\n", + port, ret); + dowork = false; } + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (create_connector) + drm_dp_mst_port_add_connector(mstb, port); + +out: drm_dp_mst_topology_put_port(port); if (dowork) queue_work(system_long_wq, &mstb->mgr->work); - } static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, @@ -2130,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, return mstb; } -static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_port *port; - struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) - drm_dp_send_link_address(mgr, mstb); + int ret; + bool changed = false; + + if (!mstb->link_address_sent) { + ret = drm_dp_send_link_address(mgr, mstb); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; + } list_for_each_entry(port, &mstb->ports, next) { - if (port->input) - continue; + struct drm_dp_mst_branch *mstb_child = NULL; - if (!port->ddps) + if (port->input || !port->ddps) continue; - if (!port->available_pbn) + if (!port->available_pbn) { + drm_modeset_lock(&mgr->base.lock, NULL); drm_dp_send_enum_path_resources(mgr, mstb, port); + drm_modeset_unlock(&mgr->base.lock); + changed = true; + } - if (port->mstb) { + if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); - if (mstb_child) { - drm_dp_check_and_send_link_address(mgr, mstb_child); - drm_dp_mst_topology_put_mstb(mstb_child); - } + + if (mstb_child) { + ret = drm_dp_check_and_send_link_address(mgr, + mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; } } + + return changed; } static void drm_dp_mst_link_probe_work(struct work_struct *work) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_device *dev = mgr->dev; struct drm_dp_mst_branch *mstb; int ret; + mutex_lock(&mgr->probe_lock); + mutex_lock(&mgr->lock); mstb = mgr->mst_primary; if (mstb) { @@ -2173,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) mstb = NULL; } mutex_unlock(&mgr->lock); - if (mstb) { - drm_dp_check_and_send_link_address(mgr, mstb); - drm_dp_mst_topology_put_mstb(mstb); + if (!mstb) { + mutex_unlock(&mgr->probe_lock); + return; } + + ret = drm_dp_check_and_send_link_address(mgr, mstb); + drm_dp_mst_topology_put_mstb(mstb); + + mutex_unlock(&mgr->probe_lock); + if (ret) + drm_kms_helper_hotplug_event(dev); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -2422,16 +2787,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) } } -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_link_address_ack_reply *reply; - int i, len, ret; + struct drm_dp_mst_port *port, *tmp; + int i, len, ret, port_mask = 0; + bool changed = false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return; + return -ENOMEM; txmsg->dst = mstb; len = build_link_address(txmsg); @@ -2457,16 +2824,39 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, drm_dp_check_mstb_guid(mstb, reply->guid); - for (i = 0; i < reply->nports; i++) - drm_dp_mst_handle_link_address_port(mstb, mgr->dev, - &reply->ports[i]); + for (i = 0; i < reply->nports; i++) { + port_mask |= BIT(reply->ports[i].port_number); + ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, + &reply->ports[i]); + if (ret == 1) + changed = true; + else if (ret < 0) + goto out; + } - drm_kms_helper_hotplug_event(mgr->dev); + /* Prune any ports that are currently a part of mstb in our in-memory + * topology, but were not seen in this link address. Usually this + * means that they were removed while the topology was out of sync, + * e.g. during suspend/resume + */ + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + if (port_mask & BIT(port->port_num)) + continue; + + DRM_DEBUG_KMS("port %d was not in link address, removing\n", + port->port_num); + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + changed = true; + } + mutex_unlock(&mgr->lock); out: if (ret <= 0) mstb->link_address_sent = false; kfree(txmsg); + return ret < 0 ? ret : changed; } static int @@ -3071,6 +3461,23 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); +static void +drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + /* The link address will need to be re-sent on resume */ + mstb->link_address_sent = false; + + list_for_each_entry(port, &mstb->ports, next) { + /* The PBN for each port will also need to be re-probed */ + port->available_pbn = 0; + + if (port->mstb) + drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); + } +} + /** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend @@ -3084,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->up_req_work); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + flush_work(&mgr->delayed_destroy_work); + + mutex_lock(&mgr->lock); + if (mgr->mst_state && mgr->mst_primary) + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + mutex_unlock(&mgr->lock); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); /** * drm_dp_mst_topology_mgr_resume() - resume the MST manager * @mgr: manager to resume + * @sync: whether or not to perform topology reprobing synchronously * * This will fetch DPCD and see if the device is still there, * if it is, it will rewrite the MSTM control bits, and return. * - * if the device fails this returns -1, and the driver should do + * If the device fails this returns -1, and the driver should do * a full MST reprobe, in case we were undocked. + * + * During system resume (where it is assumed that the driver will be calling + * drm_atomic_helper_resume()) this function should be called beforehand with + * @sync set to true. In contexts like runtime resume where the driver is not + * expected to be calling drm_atomic_helper_resume(), this function should be + * called with @sync set to false in order to avoid deadlocking. + * + * Returns: -1 if the MST topology was removed while we were suspended, 0 + * otherwise. */ -int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync) { - int ret = 0; + int ret; + u8 guid[16]; mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; - if (mgr->mst_primary) { - int sret; - u8 guid[16]; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } - sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (sret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + goto out_fail; + } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); - if (ret < 0) { - DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); - /* Some hubs forget their guids after they resume */ - sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (sret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } - drm_dp_check_mstb_guid(mgr->mst_primary, guid); + /* + * For the final step of resuming the topology, we need to bring the + * state of our in-memory topology back into sync with reality. So, + * restart the probing process as if we're probing a new hub + */ + queue_work(system_long_wq, &mgr->work); + mutex_unlock(&mgr->lock); - ret = 0; - } else - ret = -1; + if (sync) { + DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); + flush_work(&mgr->work); + } -out_unlock: + return 0; + +out_fail: mutex_unlock(&mgr->lock); - return ret; + return -1; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); @@ -3256,12 +3690,78 @@ clear_down_rep_recv: return 0; } +static inline bool +drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_pending_up_req *up_req) +{ + struct drm_dp_mst_branch *mstb = NULL; + struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; + struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; + bool hotplug = false; + + if (hdr->broadcast) { + const u8 *guid = NULL; + + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) + guid = msg->u.conn_stat.guid; + else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) + guid = msg->u.resource_stat.guid; + + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); + } else { + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); + } + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr->lct); + return false; + } + + /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } + + drm_dp_mst_topology_put_mstb(mstb); + return hotplug; +} + +static void drm_dp_mst_up_req_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + up_req_work); + struct drm_dp_pending_up_req *up_req; + bool send_hotplug = false; + + mutex_lock(&mgr->probe_lock); + while (true) { + mutex_lock(&mgr->up_req_lock); + up_req = list_first_entry_or_null(&mgr->up_req_list, + struct drm_dp_pending_up_req, + next); + if (up_req) + list_del(&up_req->next); + mutex_unlock(&mgr->up_req_lock); + + if (!up_req) + break; + + send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); + kfree(up_req); + } + mutex_unlock(&mgr->probe_lock); + + if (send_hotplug) + drm_kms_helper_hotplug_event(mgr->dev); +} + static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { - struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; - struct drm_dp_mst_branch *mstb = NULL; - const u8 *guid; + struct drm_dp_pending_up_req *up_req; bool seqno; if (!drm_dp_get_one_sb_msg(mgr, true)) @@ -3270,56 +3770,53 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (!mgr->up_req_recv.have_eomt) return 0; - if (!hdr->broadcast) { - mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } + up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); + if (!up_req) { + DRM_ERROR("Not enough memory to process MST up req\n"); + return -ENOMEM; } + INIT_LIST_HEAD(&up_req->next); seqno = hdr->seqno; - drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) - guid = msg.u.conn_stat.guid; - else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) - guid = msg.u.resource_stat.guid; - else + if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && + up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { + DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", + up_req->msg.req_type); + kfree(up_req); goto out; - - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, - false); - - if (!mstb) { - mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } } - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + seqno, false); + + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + const struct drm_dp_connection_status_notify *conn_stat = + &up_req->msg.u.conn_stat; DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", - msg.u.conn_stat.port_number, - msg.u.conn_stat.legacy_device_plug_status, - msg.u.conn_stat.displayport_device_plug_status, - msg.u.conn_stat.message_capability_status, - msg.u.conn_stat.input_port, - msg.u.conn_stat.peer_device_type); + conn_stat->port_number, + conn_stat->legacy_device_plug_status, + conn_stat->displayport_device_plug_status, + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); + } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + const struct drm_dp_resource_status_notify *res_stat = + &up_req->msg.u.resource_stat; - drm_kms_helper_hotplug_event(mgr->dev); - } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", - msg.u.resource_stat.port_number, - msg.u.resource_stat.available_pbn); + res_stat->port_number, + res_stat->available_pbn); } - drm_dp_mst_topology_put_mstb(mstb); + up_req->hdr = *hdr; + mutex_lock(&mgr->up_req_lock); + list_add_tail(&up_req->next, &mgr->up_req_list); + mutex_unlock(&mgr->up_req_lock); + queue_work(system_long_wq, &mgr->up_req_work); + out: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -3366,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); /** * drm_dp_mst_detect_port() - get connection status for an MST port * @connector: DRM connector for this port + * @ctx: The acquisition context to use for grabbing locks * @mgr: manager for this port - * @port: unverified pointer to a port + * @port: pointer to a port * - * This returns the current connection state for a port. It validates the - * port pointer still exists so the caller doesn't require a reference + * This returns the current connection state for a port. */ -enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, - struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { - enum drm_connector_status status = connector_status_disconnected; + int ret; /* we need to search for the port in the mgr in case it's gone */ port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return connector_status_disconnected; + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + goto out; + + ret = connector_status_disconnected; + if (!port->ddps) goto out; @@ -3391,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_SST_SINK: - status = connector_status_connected; + ret = connector_status_connected; /* for logical ports - cache the EDID */ if (port->port_num >= 8 && !port->cached_edid) { port->cached_edid = drm_get_edid(connector, &port->aux.ddc); @@ -3399,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) - status = connector_status_connected; + ret = connector_status_connected; break; } out: drm_dp_mst_topology_put_port(port); - return status; + return ret; } EXPORT_SYMBOL(drm_dp_mst_detect_port); @@ -3994,34 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } -static void drm_dp_destroy_connector_work(struct work_struct *work) +static inline void +drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); - struct drm_dp_mst_port *port; - bool send_hotplug = false; + if (port->connector) + port->mgr->cbs->destroy_connector(port->mgr, port->connector); + + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_mst_put_port_malloc(port); +} + +static inline void +drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + } + mutex_unlock(&mgr->lock); + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up_all(&mstb->mgr->tx_waitq); + + drm_dp_mst_put_mstb_malloc(mstb); +} + +static void drm_dp_delayed_destroy_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + delayed_destroy_work); + bool send_hotplug = false, go_again; + /* * Not a regular list traverse as we have to drop the destroy - * connector lock before destroying the connector, to avoid AB->BA + * connector lock before destroying the mstb/port, to avoid AB->BA * ordering between this lock and the config mutex. */ - for (;;) { - mutex_lock(&mgr->destroy_connector_lock); - port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); - if (!port) { - mutex_unlock(&mgr->destroy_connector_lock); - break; + do { + go_again = false; + + for (;;) { + struct drm_dp_mst_branch *mstb; + + mutex_lock(&mgr->delayed_destroy_lock); + mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, + struct drm_dp_mst_branch, + destroy_next); + if (mstb) + list_del(&mstb->destroy_next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!mstb) + break; + + drm_dp_delayed_destroy_mstb(mstb); + go_again = true; } - list_del(&port->next); - mutex_unlock(&mgr->destroy_connector_lock); - mgr->cbs->destroy_connector(mgr, port->connector); + for (;;) { + struct drm_dp_mst_port *port; - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + mutex_lock(&mgr->delayed_destroy_lock); + port = list_first_entry_or_null(&mgr->destroy_port_list, + struct drm_dp_mst_port, + next); + if (port) + list_del(&port->next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!port) + break; + + drm_dp_delayed_destroy_port(port); + send_hotplug = true; + go_again = true; + } + } while (go_again); - drm_dp_mst_put_port_malloc(port); - send_hotplug = true; - } if (send_hotplug) drm_kms_helper_hotplug_event(mgr->dev); } @@ -4208,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); - mutex_init(&mgr->destroy_connector_lock); + mutex_init(&mgr->delayed_destroy_lock); + mutex_init(&mgr->up_req_lock); + mutex_init(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_init(&mgr->topology_ref_history_lock); +#endif INIT_LIST_HEAD(&mgr->tx_msg_downq); - INIT_LIST_HEAD(&mgr->destroy_connector_list); + INIT_LIST_HEAD(&mgr->destroy_port_list); + INIT_LIST_HEAD(&mgr->destroy_branch_device_list); + INIT_LIST_HEAD(&mgr->up_req_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work); - INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); + INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); + INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); init_waitqueue_head(&mgr->tx_waitq); mgr->dev = dev; mgr->aux = aux; @@ -4254,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { drm_dp_mst_topology_mgr_set_mst(mgr, false); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + cancel_work_sync(&mgr->delayed_destroy_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); mgr->payloads = NULL; @@ -4266,10 +4849,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; - mutex_destroy(&mgr->destroy_connector_lock); + mutex_destroy(&mgr->delayed_destroy_lock); mutex_destroy(&mgr->payload_lock); mutex_destroy(&mgr->qlock); mutex_destroy(&mgr->lock); + mutex_destroy(&mgr->up_req_lock); + mutex_destroy(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_destroy(&mgr->topology_ref_history_lock); +#endif } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5eeafa45831a..403b593a3eb4 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -7625,7 +7625,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) if (!intel_dp->can_mst) continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, + true); if (ret) { intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index bbcab27644dc..a9962846a503 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -391,20 +391,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) return ret; } -static enum drm_connector_status -intel_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_dp *intel_dp = intel_connector->mst_port; - - if (drm_connector_is_unregistered(connector)) - return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, - intel_connector->port); -} - static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { - .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, @@ -465,11 +452,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c return &intel_dp->mst_encoders[crtc->pipe]->base.base; } +static int +intel_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + if (drm_connector_is_unregistered(connector)) + return connector_status_disconnected; + + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, + intel_connector->port); +} + static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, .atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_check = intel_dp_mst_atomic_check, + .detect_ctx = intel_dp_mst_detect, }; static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index a13924ae1992..549486f1d937 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector, return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); } -static const struct drm_connector_helper_funcs -nv50_mstc_help = { - .get_modes = nv50_mstc_get_modes, - .mode_valid = nv50_mstc_mode_valid, - .best_encoder = nv50_mstc_best_encoder, - .atomic_best_encoder = nv50_mstc_atomic_best_encoder, - .atomic_check = nv50_mstc_atomic_check, -}; - -static enum drm_connector_status -nv50_mstc_detect(struct drm_connector *connector, bool force) +static int +nv50_mstc_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) { struct nv50_mstc *mstc = nv50_mstc(connector); - enum drm_connector_status conn_status; int ret; if (drm_connector_is_unregistered(connector)) @@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) if (ret < 0 && ret != -EACCES) return connector_status_disconnected; - conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, - mstc->port); + ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr, + mstc->port); pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); - return conn_status; + return ret; } +static const struct drm_connector_helper_funcs +nv50_mstc_help = { + .get_modes = nv50_mstc_get_modes, + .mode_valid = nv50_mstc_mode_valid, + .best_encoder = nv50_mstc_best_encoder, + .atomic_best_encoder = nv50_mstc_atomic_best_encoder, + .atomic_check = nv50_mstc_atomic_check, + .detect_ctx = nv50_mstc_detect, +}; + static void nv50_mstc_destroy(struct drm_connector *connector) { @@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector) static const struct drm_connector_funcs nv50_mstc = { .reset = nouveau_conn_reset, - .detect = nv50_mstc_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = nv50_mstc_destroy, .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state, @@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm) } static void -nv50_mstm_init(struct nv50_mstm *mstm) +nv50_mstm_init(struct nv50_mstm *mstm, bool runtime) { int ret; if (!mstm || !mstm->mgr.mst_state) return; - ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); if (ret == -1) { drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); drm_kms_helper_hotplug_event(mstm->mgr.dev); @@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - nv50_mstm_init(nv_encoder->dp.mstm); + nv50_mstm_init(nv_encoder->dp.mstm, runtime); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 3a5db17bc5c7..5b413588b823 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1130,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const char *name = connector->name; struct nouveau_encoder *nv_encoder; int ret; + bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); + + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { + NV_DEBUG(drm, "service %s\n", name); + drm_dp_cec_irq(&nv_connector->aux); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) + nv50_mstm_service(nv_encoder->dp.mstm); + + return NVIF_NOTIFY_KEEP; + } ret = pm_runtime_get(drm->dev->dev); if (ret == 0) { @@ -1150,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) return NVIF_NOTIFY_DROP; } - if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { - NV_DEBUG(drm, "service %s\n", name); - drm_dp_cec_irq(&nv_connector->aux); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) - nv50_mstm_service(nv_encoder->dp.mstm); - } else { - bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); - + if (!plugged) + drm_dp_cec_unset_edid(&nv_connector->aux); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { if (!plugged) - drm_dp_cec_unset_edid(&nv_connector->aux); - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { - if (!plugged) - nv50_mstm_remove(nv_encoder->dp.mstm); - } - - drm_helper_hpd_irq_event(connector->dev); + nv50_mstm_remove(nv_encoder->dp.mstm); } + drm_helper_hpd_irq_event(connector->dev); + pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6f038511a03a..53f9bceaf17a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) struct drm_connector_list_iter conn_iter; int ret; + /* + * Enable hotplug interrupts (done as early as possible, since we need + * them for MST) + */ + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + struct nouveau_connector *conn = nouveau_connector(connector); + nvif_notify_get(&conn->hpd); + } + drm_connector_list_iter_end(&conn_iter); + ret = disp->init(dev, resume, runtime); if (ret) return ret; @@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) */ drm_kms_helper_poll_enable(dev); - /* enable hotplug interrupts */ - drm_connector_list_iter_begin(dev, &conn_iter); - nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { - struct nouveau_connector *conn = nouveau_connector(connector); - nvif_notify_get(&conn->hpd); - } - drm_connector_list_iter_end(&conn_iter); - return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2994f07fbad9..ee28f5b3785e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) return &radeon_connector->mst_encoder->base; } +static int +radeon_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + struct radeon_connector *master = radeon_connector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + radeon_connector->port); +} + static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = { .get_modes = radeon_dp_mst_get_modes, .mode_valid = radeon_dp_mst_mode_valid, .best_encoder = radeon_mst_best_encoder, + .detect_ctx = radeon_dp_mst_detect, }; -static enum drm_connector_status -radeon_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector *master = radeon_connector->mst_port; - - return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port); -} - static void radeon_dp_mst_connector_destroy(struct drm_connector *connector) { @@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector) static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { .dpms = drm_helper_connector_dpms, - .detect = radeon_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_dp_mst_connector_destroy, }; |