2016-11-14 14:58:23 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, distribute, and sell this software and its
|
|
|
|
* documentation for any purpose is hereby granted without fee, provided that
|
|
|
|
* the above copyright notice appear in all copies and that both that copyright
|
|
|
|
* notice and this permission notice appear in supporting documentation, and
|
|
|
|
* that the name of the copyright holders not be used in advertising or
|
|
|
|
* publicity pertaining to distribution of the software without specific,
|
|
|
|
* written prior permission. The copyright holders make no representations
|
|
|
|
* about the suitability of this software for any purpose. It is provided "as
|
|
|
|
* is" without express or implied warranty.
|
|
|
|
*
|
|
|
|
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
|
|
|
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
|
|
|
|
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
|
|
|
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
|
|
|
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
|
|
|
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
|
|
|
* OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-11-28 21:51:09 +03:00
|
|
|
#include <drm/drm_encoder.h>
|
2016-11-14 14:58:23 +03:00
|
|
|
#include <drm/drm_mode_config.h>
|
|
|
|
#include <drm/drmP.h>
|
|
|
|
|
|
|
|
#include "drm_crtc_internal.h"
|
|
|
|
#include "drm_internal.h"
|
|
|
|
|
|
|
|
int drm_modeset_register_all(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_plane_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_plane;
|
|
|
|
|
|
|
|
ret = drm_crtc_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_crtc;
|
|
|
|
|
|
|
|
ret = drm_encoder_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_encoder;
|
|
|
|
|
|
|
|
ret = drm_connector_register_all(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_connector;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_connector:
|
|
|
|
drm_encoder_unregister_all(dev);
|
|
|
|
err_encoder:
|
|
|
|
drm_crtc_unregister_all(dev);
|
|
|
|
err_crtc:
|
|
|
|
drm_plane_unregister_all(dev);
|
|
|
|
err_plane:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void drm_modeset_unregister_all(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_connector_unregister_all(dev);
|
|
|
|
drm_encoder_unregister_all(dev);
|
|
|
|
drm_crtc_unregister_all(dev);
|
|
|
|
drm_plane_unregister_all(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_getresources - get graphics configuration
|
|
|
|
* @dev: drm device for the ioctl
|
|
|
|
* @data: data pointer for the ioctl
|
|
|
|
* @file_priv: drm file for the ioctl call
|
|
|
|
*
|
|
|
|
* Construct a set of configuration description structures and return
|
|
|
|
* them to the user, including CRTC, connector and framebuffer configuration.
|
|
|
|
*
|
|
|
|
* Called by the user via ioctl.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative errno on failure.
|
|
|
|
*/
|
|
|
|
int drm_mode_getresources(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_mode_card_res *card_res = data;
|
|
|
|
struct drm_framebuffer *fb;
|
|
|
|
struct drm_connector *connector;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_encoder *encoder;
|
2016-12-11 22:20:19 +03:00
|
|
|
int count, ret = 0;
|
2016-11-14 14:58:23 +03:00
|
|
|
uint32_t __user *fb_id;
|
|
|
|
uint32_t __user *crtc_id;
|
|
|
|
uint32_t __user *connector_id;
|
|
|
|
uint32_t __user *encoder_id;
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2016-11-14 14:58:23 +03:00
|
|
|
|
|
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
|
|
mutex_lock(&file_priv->fbs_lock);
|
2016-12-11 22:20:19 +03:00
|
|
|
count = 0;
|
|
|
|
fb_id = u64_to_user_ptr(card_res->fb_id_ptr);
|
|
|
|
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
|
|
|
|
if (count < card_res->count_fbs &&
|
|
|
|
put_user(fb->base.id, fb_id + count)) {
|
|
|
|
mutex_unlock(&file_priv->fbs_lock);
|
|
|
|
return -EFAULT;
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
2016-12-11 22:20:19 +03:00
|
|
|
count++;
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
2016-12-11 22:20:19 +03:00
|
|
|
card_res->count_fbs = count;
|
2016-11-14 14:58:23 +03:00
|
|
|
mutex_unlock(&file_priv->fbs_lock);
|
|
|
|
|
|
|
|
card_res->max_height = dev->mode_config.max_height;
|
|
|
|
card_res->min_height = dev->mode_config.min_height;
|
|
|
|
card_res->max_width = dev->mode_config.max_width;
|
|
|
|
card_res->min_width = dev->mode_config.min_width;
|
|
|
|
|
2016-12-11 22:20:19 +03:00
|
|
|
count = 0;
|
|
|
|
crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
|
|
|
|
drm_for_each_crtc(crtc, dev) {
|
|
|
|
if (count < card_res->count_crtcs &&
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
put_user(crtc->base.id, crtc_id + count))
|
|
|
|
return -EFAULT;
|
2016-12-11 22:20:19 +03:00
|
|
|
count++;
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
2016-12-11 22:20:19 +03:00
|
|
|
card_res->count_crtcs = count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr);
|
|
|
|
drm_for_each_encoder(encoder, dev) {
|
|
|
|
if (count < card_res->count_encoders &&
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
put_user(encoder->base.id, encoder_id + count))
|
|
|
|
return -EFAULT;
|
2016-12-11 22:20:19 +03:00
|
|
|
count++;
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
2016-12-11 22:20:19 +03:00
|
|
|
card_res->count_encoders = count;
|
|
|
|
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
drm_connector_list_iter_get(dev, &conn_iter);
|
2016-12-11 22:20:19 +03:00
|
|
|
count = 0;
|
|
|
|
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
drm_for_each_connector_iter(connector, &conn_iter) {
|
2016-12-11 22:20:19 +03:00
|
|
|
if (count < card_res->count_connectors &&
|
|
|
|
put_user(connector->base.id, connector_id + count)) {
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
drm_connector_list_iter_put(&conn_iter);
|
|
|
|
return -EFAULT;
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
2016-12-11 22:20:19 +03:00
|
|
|
count++;
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
2016-12-11 22:20:19 +03:00
|
|
|
card_res->count_connectors = count;
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
drm_connector_list_iter_put(&conn_iter);
|
2016-11-14 14:58:23 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_config_reset - call ->reset callbacks
|
|
|
|
* @dev: drm device
|
|
|
|
*
|
|
|
|
* This functions calls all the crtc's, encoder's and connector's ->reset
|
|
|
|
* callback. Drivers can use this in e.g. their driver load or resume code to
|
|
|
|
* reset hardware and software state.
|
|
|
|
*/
|
|
|
|
void drm_mode_config_reset(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_plane *plane;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
struct drm_connector *connector;
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2016-11-14 14:58:23 +03:00
|
|
|
|
|
|
|
drm_for_each_plane(plane, dev)
|
|
|
|
if (plane->funcs->reset)
|
|
|
|
plane->funcs->reset(plane);
|
|
|
|
|
|
|
|
drm_for_each_crtc(crtc, dev)
|
|
|
|
if (crtc->funcs->reset)
|
|
|
|
crtc->funcs->reset(crtc);
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, dev)
|
|
|
|
if (encoder->funcs->reset)
|
|
|
|
encoder->funcs->reset(encoder);
|
|
|
|
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
drm_connector_list_iter_get(dev, &conn_iter);
|
|
|
|
drm_for_each_connector_iter(connector, &conn_iter)
|
2016-11-14 14:58:23 +03:00
|
|
|
if (connector->funcs->reset)
|
|
|
|
connector->funcs->reset(connector);
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
drm_connector_list_iter_put(&conn_iter);
|
2016-11-14 14:58:23 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_mode_config_reset);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Global properties
|
|
|
|
*/
|
|
|
|
static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
|
|
|
|
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
|
|
|
|
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
|
|
|
|
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int drm_mode_create_standard_properties(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_property *prop;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_connector_create_standard_properties(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
|
|
|
|
"type", drm_plane_type_enum_list,
|
|
|
|
ARRAY_SIZE(drm_plane_type_enum_list));
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.plane_type_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_X", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_x = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_Y", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_y = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_W", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_w = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"SRC_H", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_src_h = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_X", INT_MIN, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_x = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_Y", INT_MIN, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_y = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_W", 0, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_w = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_H", 0, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_h = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"FB_ID", DRM_MODE_OBJECT_FB);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_fb_id = prop;
|
|
|
|
|
2016-11-15 16:06:39 +03:00
|
|
|
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"IN_FENCE_FD", -1, INT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_in_fence_fd = prop;
|
|
|
|
|
2016-11-16 16:00:21 +03:00
|
|
|
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"OUT_FENCE_PTR", 0, U64_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_out_fence_ptr = prop;
|
|
|
|
|
2016-11-14 14:58:23 +03:00
|
|
|
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"CRTC_ID", DRM_MODE_OBJECT_CRTC);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_crtc_id = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
|
|
|
|
"ACTIVE");
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_active = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
|
|
|
|
"MODE_ID", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.prop_mode_id = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_BLOB,
|
|
|
|
"DEGAMMA_LUT", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.degamma_lut_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev,
|
|
|
|
DRM_MODE_PROP_IMMUTABLE,
|
|
|
|
"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.degamma_lut_size_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_BLOB,
|
|
|
|
"CTM", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.ctm_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create(dev,
|
|
|
|
DRM_MODE_PROP_BLOB,
|
|
|
|
"GAMMA_LUT", 0);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.gamma_lut_property = prop;
|
|
|
|
|
|
|
|
prop = drm_property_create_range(dev,
|
|
|
|
DRM_MODE_PROP_IMMUTABLE,
|
|
|
|
"GAMMA_LUT_SIZE", 0, UINT_MAX);
|
|
|
|
if (!prop)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->mode_config.gamma_lut_size_property = prop;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_config_init - initialize DRM mode_configuration structure
|
|
|
|
* @dev: DRM device
|
|
|
|
*
|
|
|
|
* Initialize @dev's mode_config structure, used for tracking the graphics
|
|
|
|
* configuration of @dev.
|
|
|
|
*
|
|
|
|
* Since this initializes the modeset locks, no locking is possible. Which is no
|
|
|
|
* problem, since this should happen single threaded at init time. It is the
|
|
|
|
* driver's problem to ensure this guarantee.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void drm_mode_config_init(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
mutex_init(&dev->mode_config.mutex);
|
|
|
|
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
|
|
|
|
mutex_init(&dev->mode_config.idr_mutex);
|
|
|
|
mutex_init(&dev->mode_config.fb_lock);
|
|
|
|
mutex_init(&dev->mode_config.blob_lock);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.fb_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.connector_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.property_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
|
|
|
|
INIT_LIST_HEAD(&dev->mode_config.plane_list);
|
|
|
|
idr_init(&dev->mode_config.crtc_idr);
|
|
|
|
idr_init(&dev->mode_config.tile_idr);
|
|
|
|
ida_init(&dev->mode_config.connector_ida);
|
drm: locking&new iterators for connector_list
The requirements for connector_list locking are a bit tricky:
- We need to be able to jump over zombie conectors (i.e. with refcount
== 0, but not yet removed from the list). If instead we require that
there's no zombies on the list then the final kref_put must happen
under the list protection lock, which means that locking context
leaks all over the place. Not pretty - better to deal with zombies
and wrap the locking just around the list_del in the destructor.
- When we walk the list we must _not_ hold the connector list lock. We
walk the connector list at an absolutely massive amounts of places,
if all those places can't ever call drm_connector_unreference the
code would get unecessarily complicated.
- connector_list needs it own lock, again too many places that walk it
that we could reuse e.g. mode_config.mutex without resulting in
inversions.
- Lots of code uses these loops to look-up a connector, i.e. they want
to be able to call drm_connector_reference. But on the other hand we
want connectors to stay on that list until they're dead (i.e.
connector_list can't hold a full reference), which means despite the
"can't hold lock for the loop body" rule we need to make sure a
connector doesn't suddenly become a zombie.
At first Dave&I discussed various horror-show approaches using srcu,
but turns out it's fairly easy:
- For the loop body we always hold an additional reference to the
current connector. That means it can't zombify, and it also means
it'll stay on the list, which means we can use it as our iterator to
find the next connector.
- When we try to find the next connector we only have to jump over
zombies. To make sure we don't chase bad pointers that entire loop
is protected with the new connect_list_lock spinlock. And because we
know that we're starting out with a non-zombie (need to drop our
reference for the old connector only after we have our new one),
we're guranteed to still be on the connector_list and either find
the next non-zombie or complete the iteration.
- Only downside is that we need to make sure that the temporary
reference for the loop body doesn't leak. iter_get/put() functions +
lockdep make sure that's the case.
- To avoid a flag day the new iterator macro has an _iter postfix. We
can rename it back once all the users of the unsafe version are gone
(there's about 100 list walkers for the connector_list).
For now this patch only converts all the list walking in the core,
leaving helpers and drivers for later patches. The nice thing is that
we can now finally remove 2 FIXME comments from the
register/unregister functions.
v2:
- use irqsafe spinlocks, so that we can use this in drm_state_dump
too.
- nuke drm_modeset_lock_all from drm_connector_init, now entirely
cargo-culted nonsense.
v3:
- do {} while (!kref_get_unless_zero), makes for a tidier loop (Dave).
- pretty kerneldoc
- add EXPORT_SYMBOL, helpers&drivers are supposed to use this.
v4: Change lockdep annotations to only check whether we release the
iter fake lock again (i.e. make sure that iter_put is called), but
not check any locking dependecies itself. That seams to require a
recursive read lock in trylock mode.
Cc: Dave Airlie <airlied@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161213230814.19598-6-daniel.vetter@ffwll.ch
2016-12-14 02:08:06 +03:00
|
|
|
spin_lock_init(&dev->mode_config.connector_list_lock);
|
2016-11-14 14:58:23 +03:00
|
|
|
|
|
|
|
drm_mode_create_standard_properties(dev);
|
|
|
|
|
|
|
|
/* Just to be sure */
|
|
|
|
dev->mode_config.num_fb = 0;
|
|
|
|
dev->mode_config.num_connector = 0;
|
|
|
|
dev->mode_config.num_crtc = 0;
|
|
|
|
dev->mode_config.num_encoder = 0;
|
|
|
|
dev->mode_config.num_overlay_plane = 0;
|
|
|
|
dev->mode_config.num_total_plane = 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_mode_config_init);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_mode_config_cleanup - free up DRM mode_config info
|
|
|
|
* @dev: DRM device
|
|
|
|
*
|
|
|
|
* Free up all the connectors and CRTCs associated with this DRM device, then
|
|
|
|
* free up the framebuffers and associated buffer objects.
|
|
|
|
*
|
|
|
|
* Note that since this /should/ happen single-threaded at driver/device
|
|
|
|
* teardown time, no locking is required. It's the driver's job to ensure that
|
|
|
|
* this guarantee actually holds true.
|
|
|
|
*
|
|
|
|
* FIXME: cleanup any dangling user buffer objects too
|
|
|
|
*/
|
|
|
|
void drm_mode_config_cleanup(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct drm_connector *connector, *ot;
|
|
|
|
struct drm_crtc *crtc, *ct;
|
|
|
|
struct drm_encoder *encoder, *enct;
|
|
|
|
struct drm_framebuffer *fb, *fbt;
|
|
|
|
struct drm_property *property, *pt;
|
|
|
|
struct drm_property_blob *blob, *bt;
|
|
|
|
struct drm_plane *plane, *plt;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
|
|
|
|
head) {
|
|
|
|
encoder->funcs->destroy(encoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(connector, ot,
|
|
|
|
&dev->mode_config.connector_list, head) {
|
|
|
|
connector->funcs->destroy(connector);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
|
|
|
|
head) {
|
|
|
|
drm_property_destroy(dev, property);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
|
|
|
|
head) {
|
|
|
|
plane->funcs->destroy(plane);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
|
|
|
|
crtc->funcs->destroy(crtc);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
|
|
|
|
head_global) {
|
|
|
|
drm_property_unreference_blob(blob);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Single-threaded teardown context, so it's not required to grab the
|
|
|
|
* fb_lock to protect against concurrent fb_list access. Contrary, it
|
|
|
|
* would actually deadlock with the drm_framebuffer_cleanup function.
|
|
|
|
*
|
|
|
|
* Also, if there are any framebuffers left, that's a driver leak now,
|
|
|
|
* so politely WARN about this.
|
|
|
|
*/
|
|
|
|
WARN_ON(!list_empty(&dev->mode_config.fb_list));
|
|
|
|
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
|
|
|
|
drm_framebuffer_free(&fb->base.refcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
ida_destroy(&dev->mode_config.connector_ida);
|
|
|
|
idr_destroy(&dev->mode_config.tile_idr);
|
|
|
|
idr_destroy(&dev->mode_config.crtc_idr);
|
|
|
|
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_mode_config_cleanup);
|