drm/dp/mst: close deadlock in connector destruction.

I've only seen this once, and I failed to capture the
lockdep backtrace, but I did some investigations.

If we are calling into the MST layer from EDID probing,
we have the mode_config mutex held, if during that EDID
probing, the MST hub goes away, then we can get a deadlock
where the connector destruction function in the driver
tries to retake the mode config mutex.

This offloads connector destruction to a workqueue,
and avoid the subsequenct lock ordering issue.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2015-06-15 10:34:28 +10:00
Родитель 8b72ce158c
Коммит 6b8eeca65b
3 изменённых файлов: 44 добавлений и 2 удалений

Просмотреть файл

@ -867,8 +867,16 @@ static void drm_dp_destroy_port(struct kref *kref)
port->vcpi.num_slots = 0;
kfree(port->cached_edid);
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
/* we can't destroy the connector here, as
we might be holding the mode_config.mutex
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
}
drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0)
@ -2649,6 +2657,30 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
struct drm_connector *connector;
/*
* Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the connector, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
if (!connector) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
list_del(&connector->destroy_list);
mutex_unlock(&mgr->destroy_connector_lock);
mgr->cbs->destroy_connector(mgr, connector);
}
}
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
@ -2668,10 +2700,13 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
@ -2696,6 +2731,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;

Просмотреть файл

@ -743,6 +743,8 @@ struct drm_connector {
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
struct list_head destroy_list;
};
/**

Просмотреть файл

@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
struct work_struct work;
struct work_struct tx_work;
struct list_head destroy_connector_list;
struct mutex destroy_connector_lock;
struct work_struct destroy_connector_work;
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);