Merge tag 'topic/drm-misc-2016-05-04' of git://anongit.freedesktop.org/drm-intel into drm-next
Ofc I promise just a few leftovers for drm-misc and somehow it's the biggest pull. But really mostly trivial stuff: - MAINTAINERS updates from Emil - rename async to nonblock in atomic_commit to avoid the confusion between nonblocking ioctl and async flip (= not vblank synced), from Maarten. Needs to be regened with newer drivers, but probably only after -rc1 to catch them all. - actually lockless gem_object_free, plus acked driver conversion patches. All the trickier prep stuff already is in drm-next. - Noralf's nice work for generic defio support in our fbdev emulation. Keeps the udl hack, and qxl is tested by Gerd. * tag 'topic/drm-misc-2016-05-04' of git://anongit.freedesktop.org/drm-intel: (47 commits) drm: Fixup locking WARN_ON mistake around gem_object_free_unlocked drm/etnaviv: Use lockless gem BO free callback drm/imx: Use lockless gem BO free callback drm/radeon: Use lockless gem BO free callback drm/amdgpu: Use lockless gem BO free callback drm/gem: support BO freeing without dev->struct_mutex MAINTAINERS: Add myself for the new VC4 (RPi GPU) graphics driver. MAINTAINERS: Add a bunch of legacy (UMS) DRM drivers MAINTAINERS: Add a few DRM drivers by Dave Airlie MAINTAINERS: List the correct git repo for the Renesas DRM drivers MAINTAINERS: Update the files list for the Renesas DRM drivers MAINTAINERS: Update the files list for the Armada DRM driver MAINTAINERS: Update the files list for the Rockchip DRM driver MAINTAINERS: Update the files list for the Exynos DRM driver MAINTAINERS: Add maintainer entry for the VMWGFX DRM driver MAINTAINERS: Add maintainer entry for the MSM DRM driver MAINTAINERS: Add maintainer entry for the Nouveau DRM driver MAINTAINERS: Update the files list for the Etnaviv DRM driver MAINTAINERS: Remove unneded wildcard for the i915 DRM driver drm/atomic: Add WARN_ON when state->acquire_ctx is not set. ...
This commit is contained in:
Коммит
21daaeee9f
|
@ -1817,7 +1817,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||
</tr>
|
||||
<tr>
|
||||
<td rowspan="42" valign="top" >DRM</td>
|
||||
<td valign="top" >Generic</td>
|
||||
<td rowspan="2" valign="top" >Generic</td>
|
||||
<td valign="top" >“rotation”</td>
|
||||
<td valign="top" >BITMASK</td>
|
||||
<td valign="top" >{ 0, "rotate-0" },
|
||||
|
@ -1832,6 +1832,13 @@ void intel_crt_init(struct drm_device *dev)
|
|||
image along the specified axis prior to rotation</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td valign="top" >“scaling mode”</td>
|
||||
<td valign="top" >ENUM</td>
|
||||
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
|
||||
<td valign="top" >Connector</td>
|
||||
<td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td rowspan="5" valign="top" >Connector</td>
|
||||
<td valign="top" >“EDID”</td>
|
||||
<td valign="top" >BLOB | IMMUTABLE</td>
|
||||
|
@ -2068,21 +2075,12 @@ void intel_crt_init(struct drm_device *dev)
|
|||
<td valign="top" >property to suggest an Y offset for a connector</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td rowspan="8" valign="top" >Optional</td>
|
||||
<td valign="top" >“scaling mode”</td>
|
||||
<td valign="top" >ENUM</td>
|
||||
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
|
||||
<td valign="top" >Connector</td>
|
||||
<td valign="top" >TBD</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td rowspan="7" valign="top" >Optional</td>
|
||||
<td valign="top" >"aspect ratio"</td>
|
||||
<td valign="top" >ENUM</td>
|
||||
<td valign="top" >{ "None", "4:3", "16:9" }</td>
|
||||
<td valign="top" >Connector</td>
|
||||
<td valign="top" >DRM property to set aspect ratio from user space app.
|
||||
This enum is made generic to allow addition of custom aspect
|
||||
ratios.</td>
|
||||
<td valign="top" >TDB</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td valign="top" >“dirty”</td>
|
||||
|
|
117
MAINTAINERS
117
MAINTAINERS
|
@ -3768,6 +3768,21 @@ F: drivers/gpu/vga/
|
|||
F: include/drm/
|
||||
F: include/uapi/drm/
|
||||
|
||||
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: drivers/gpu/drm/ast/
|
||||
|
||||
DRM DRIVER FOR BOCHS VIRTUAL GPU
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: drivers/gpu/drm/bochs/
|
||||
|
||||
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: drivers/gpu/drm/cirrus/
|
||||
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
|
@ -3800,7 +3815,7 @@ T: git git://anongit.freedesktop.org/drm-intel
|
|||
S: Supported
|
||||
F: drivers/gpu/drm/i915/
|
||||
F: include/drm/i915*
|
||||
F: include/uapi/drm/i915*
|
||||
F: include/uapi/drm/i915_drm.h
|
||||
|
||||
DRM DRIVERS FOR ATMEL HLCDC
|
||||
M: Boris Brezillon <boris.brezillon@free-electrons.com>
|
||||
|
@ -3825,8 +3840,8 @@ L: dri-devel@lists.freedesktop.org
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/exynos/
|
||||
F: include/drm/exynos*
|
||||
F: include/uapi/drm/exynos*
|
||||
F: include/uapi/drm/exynos_drm.h
|
||||
F: Documentation/devicetree/bindings/display/exynos/
|
||||
|
||||
DRM DRIVERS FOR FREESCALE DCU
|
||||
M: Stefan Agner <stefan@agner.ch>
|
||||
|
@ -3863,6 +3878,31 @@ S: Maintained
|
|||
F: drivers/gpu/drm/hisilicon/
|
||||
F: Documentation/devicetree/bindings/display/hisilicon/
|
||||
|
||||
DRM DRIVER FOR INTEL I810 VIDEO CARDS
|
||||
S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/i810/
|
||||
F: include/uapi/drm/i810_drm.h
|
||||
|
||||
DRM DRIVER FOR MSM ADRENO GPU
|
||||
M: Rob Clark <robdclark@gmail.com>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: freedreno@lists.freedesktop.org
|
||||
T: git git://people.freedesktop.org/~robclark/linux
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/msm/
|
||||
F: include/uapi/drm/msm_drm.h
|
||||
F: Documentation/devicetree/bindings/display/msm/
|
||||
|
||||
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
|
||||
M: Ben Skeggs <bskeggs@redhat.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: nouveau@lists.freedesktop.org
|
||||
T: git git://github.com/skeggsb/linux
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/nouveau/
|
||||
F: include/uapi/drm/nouveau_drm.h
|
||||
|
||||
DRM DRIVERS FOR NVIDIA TEGRA
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
M: Terje Bergström <tbergstrom@nvidia.com>
|
||||
|
@ -3876,22 +3916,54 @@ F: include/linux/host1x.h
|
|||
F: include/uapi/drm/tegra_drm.h
|
||||
F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
|
||||
|
||||
DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
|
||||
S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/mga/
|
||||
F: include/uapi/drm/mga_drm.h
|
||||
|
||||
DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: drivers/gpu/drm/mgag200/
|
||||
|
||||
DRM DRIVER FOR RAGE 128 VIDEO CARDS
|
||||
S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/r128/
|
||||
F: include/uapi/drm/r128_drm.h
|
||||
|
||||
DRM DRIVERS FOR RENESAS
|
||||
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
T: git git://people.freedesktop.org/~airlied/linux
|
||||
T: git git://linuxtv.org/pinchartl/fbdev
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/rcar-du/
|
||||
F: drivers/gpu/drm/shmobile/
|
||||
F: include/linux/platform_data/shmob_drm.h
|
||||
F: Documentation/devicetree/bindings/display/renesas,du.txt
|
||||
|
||||
DRM DRIVER FOR QXL VIRTUAL GPU
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: drivers/gpu/drm/qxl/
|
||||
F: include/uapi/drm/qxl_drm.h
|
||||
|
||||
DRM DRIVERS FOR ROCKCHIP
|
||||
M: Mark Yao <mark.yao@rock-chips.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/rockchip/
|
||||
F: Documentation/devicetree/bindings/display/rockchip*
|
||||
F: Documentation/devicetree/bindings/display/rockchip/
|
||||
|
||||
DRM DRIVER FOR SAVAGE VIDEO CARDS
|
||||
S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/savage/
|
||||
F: include/uapi/drm/savage_drm.h
|
||||
|
||||
DRM DRIVER FOR SIS VIDEO CARDS
|
||||
S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/sis/
|
||||
F: include/uapi/drm/sis_drm.h
|
||||
|
||||
DRM DRIVERS FOR STI
|
||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||
|
@ -3902,14 +3974,43 @@ S: Maintained
|
|||
F: drivers/gpu/drm/sti
|
||||
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
|
||||
|
||||
DRM DRIVER FOR TDFX VIDEO CARDS
|
||||
S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/tdfx/
|
||||
|
||||
DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
F: drivers/gpu/drm/udl/
|
||||
|
||||
DRM DRIVERS FOR VIVANTE GPU IP
|
||||
M: Lucas Stach <l.stach@pengutronix.de>
|
||||
R: Russell King <linux+etnaviv@arm.linux.org.uk>
|
||||
R: Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/etnaviv
|
||||
F: Documentation/devicetree/bindings/display/etnaviv
|
||||
F: drivers/gpu/drm/etnaviv/
|
||||
F: include/uapi/drm/etnaviv_drm.h
|
||||
F: Documentation/devicetree/bindings/display/etnaviv/
|
||||
|
||||
DRM DRIVER FOR VMWARE VIRTUAL GPU
|
||||
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
|
||||
M: Sinclair Yeh <syeh@vmware.com>
|
||||
M: Thomas Hellstrom <thellstrom@vmware.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://people.freedesktop.org/~syeh/repos_linux
|
||||
T: git git://people.freedesktop.org/~thomash/linux
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/vmwgfx/
|
||||
F: include/uapi/drm/vmwgfx_drm.h
|
||||
|
||||
DRM DRIVERS FOR VC4
|
||||
M: Eric Anholt <eric@anholt.net>
|
||||
T: git git://github.com/anholt/linux
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/vc4/
|
||||
F: include/uapi/drm/vc4_drm.h
|
||||
F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
|
||||
|
||||
DSBR100 USB FM RADIO DRIVER
|
||||
M: Alexey Klimov <klimov.linux@gmail.com>
|
||||
|
@ -6931,6 +7032,8 @@ MARVELL ARMADA DRM SUPPORT
|
|||
M: Russell King <rmk+kernel@arm.linux.org.uk>
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/armada/
|
||||
F: include/uapi/drm/armada_drm.h
|
||||
F: Documentation/devicetree/bindings/display/armada/
|
||||
|
||||
MARVELL 88E6352 DSA support
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
|
|
|
@ -52,6 +52,7 @@ config DRM_KMS_FB_HELPER
|
|||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB_DEFERRED_IO
|
||||
help
|
||||
FBDEV helpers for KMS drivers.
|
||||
|
||||
|
|
|
@ -514,7 +514,7 @@ static struct drm_driver kms_driver = {
|
|||
.irq_uninstall = amdgpu_irq_uninstall,
|
||||
.irq_handler = amdgpu_irq_handler,
|
||||
.ioctls = amdgpu_ioctls_kms,
|
||||
.gem_free_object = amdgpu_gem_object_free,
|
||||
.gem_free_object_unlocked = amdgpu_gem_object_free,
|
||||
.gem_open_object = amdgpu_gem_object_open,
|
||||
.gem_close_object = amdgpu_gem_object_close,
|
||||
.dumb_create = amdgpu_mode_dumb_create,
|
||||
|
|
|
@ -3370,7 +3370,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
|
||||
/* wakeup usersapce */
|
||||
if (works->event)
|
||||
drm_send_vblank_event(adev->ddev, crtc_id, works->event);
|
||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||
|
||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
|
|
|
@ -3366,7 +3366,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
|
||||
/* wakeup usersapce */
|
||||
if(works->event)
|
||||
drm_send_vblank_event(adev->ddev, crtc_id, works->event);
|
||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||
|
||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
|
|
|
@ -3379,7 +3379,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
|
||||
/* wakeup usersapce */
|
||||
if (works->event)
|
||||
drm_send_vblank_event(adev->ddev, crtc_id, works->event);
|
||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||
|
||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
|
|||
}
|
||||
|
||||
static int hdlcd_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
struct drm_atomic_state *state, bool nonblock)
|
||||
{
|
||||
return drm_atomic_helper_commit(dev, state, false);
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
|
|||
continue;
|
||||
|
||||
/*
|
||||
* FIXME: Async commits can race with connector unplugging and
|
||||
* FIXME: Nonblocking commits can race with connector unplugging and
|
||||
* there's currently nothing that prevents cleanup up state for
|
||||
* deleted connectors. As long as the callback doesn't look at
|
||||
* the connector we'll be fine though, so make sure that's the
|
||||
|
@ -263,6 +263,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
|
|||
int ret, index = drm_crtc_index(crtc);
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
WARN_ON(!state->acquire_ctx);
|
||||
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
|
||||
if (crtc_state)
|
||||
return crtc_state;
|
||||
|
@ -622,6 +624,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
|
|||
int ret, index = drm_plane_index(plane);
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
WARN_ON(!state->acquire_ctx);
|
||||
|
||||
plane_state = drm_atomic_get_existing_plane_state(state, plane);
|
||||
if (plane_state)
|
||||
return plane_state;
|
||||
|
@ -890,6 +894,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
|
|||
struct drm_mode_config *config = &connector->dev->mode_config;
|
||||
struct drm_connector_state *connector_state;
|
||||
|
||||
WARN_ON(!state->acquire_ctx);
|
||||
|
||||
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
@ -1390,7 +1396,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
|
|||
EXPORT_SYMBOL(drm_atomic_commit);
|
||||
|
||||
/**
|
||||
* drm_atomic_async_commit - atomic&async configuration commit
|
||||
* drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
|
||||
* @state: atomic configuration to check
|
||||
*
|
||||
* Note that this function can return -EDEADLK if the driver needed to acquire
|
||||
|
@ -1405,7 +1411,7 @@ EXPORT_SYMBOL(drm_atomic_commit);
|
|||
* Returns:
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
int drm_atomic_async_commit(struct drm_atomic_state *state)
|
||||
int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_mode_config *config = &state->dev->mode_config;
|
||||
int ret;
|
||||
|
@ -1414,11 +1420,11 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
|
||||
DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
|
||||
|
||||
return config->funcs->atomic_commit(state->dev, state, true);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_async_commit);
|
||||
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
|
||||
|
||||
/*
|
||||
* The big monstor ioctl
|
||||
|
@ -1687,7 +1693,7 @@ retry:
|
|||
*/
|
||||
ret = drm_atomic_check_only(state);
|
||||
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
|
||||
ret = drm_atomic_async_commit(state);
|
||||
ret = drm_atomic_nonblocking_commit(state);
|
||||
} else {
|
||||
ret = drm_atomic_commit(state);
|
||||
}
|
||||
|
|
|
@ -1114,13 +1114,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
|
|||
* drm_atomic_helper_commit - commit validated state object
|
||||
* @dev: DRM device
|
||||
* @state: the driver state object
|
||||
* @async: asynchronous commit
|
||||
* @nonblocking: whether nonblocking behavior is requested.
|
||||
*
|
||||
* This function commits a with drm_atomic_helper_check() pre-validated state
|
||||
* object. This can still fail when e.g. the framebuffer reservation fails. For
|
||||
* now this doesn't implement asynchronous commits.
|
||||
* now this doesn't implement nonblocking commits.
|
||||
*
|
||||
* Note that right now this function does not support async commits, and hence
|
||||
* Note that right now this function does not support nonblocking commits, hence
|
||||
* driver writers must implement their own version for now. Also note that the
|
||||
* default ordering of how the various stages are called is to match the legacy
|
||||
* modeset helper library closest. One peculiarity of that is that it doesn't
|
||||
|
@ -1141,11 +1141,11 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
|
|||
*/
|
||||
int drm_atomic_helper_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async)
|
||||
bool nonblock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
return -EBUSY;
|
||||
|
||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||
|
@ -1195,20 +1195,20 @@ int drm_atomic_helper_commit(struct drm_device *dev,
|
|||
EXPORT_SYMBOL(drm_atomic_helper_commit);
|
||||
|
||||
/**
|
||||
* DOC: implementing async commit
|
||||
* DOC: implementing nonblocking commit
|
||||
*
|
||||
* For now the atomic helpers don't support async commit directly. If there is
|
||||
* real need it could be added though, using the dma-buf fence infrastructure
|
||||
* for generic synchronization with outstanding rendering.
|
||||
* For now the atomic helpers don't support nonblocking commit directly. If
|
||||
* there is real need it could be added though, using the dma-buf fence
|
||||
* infrastructure for generic synchronization with outstanding rendering.
|
||||
*
|
||||
* For now drivers have to implement async commit themselves, with the following
|
||||
* sequence being the recommended one:
|
||||
* For now drivers have to implement nonblocking commit themselves, with the
|
||||
* following sequence being the recommended one:
|
||||
*
|
||||
* 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
|
||||
* which commit needs to call which can fail, so we want to run it first and
|
||||
* synchronously.
|
||||
*
|
||||
* 2. Synchronize with any outstanding asynchronous commit worker threads which
|
||||
* 2. Synchronize with any outstanding nonblocking commit worker threads which
|
||||
* might be affected the new state update. This can be done by either cancelling
|
||||
* or flushing the work items, depending upon whether the driver can deal with
|
||||
* cancelled updates. Note that it is important to ensure that the framebuffer
|
||||
|
@ -1222,9 +1222,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
|
|||
* 3. The software state is updated synchronously with
|
||||
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
|
||||
* locks means concurrent callers never see inconsistent state. And doing this
|
||||
* while it's guaranteed that no relevant async worker runs means that async
|
||||
* workers do not need grab any locks. Actually they must not grab locks, for
|
||||
* otherwise the work flushing will deadlock.
|
||||
* while it's guaranteed that no relevant nonblocking worker runs means that
|
||||
* nonblocking workers do not need grab any locks. Actually they must not grab
|
||||
* locks, for otherwise the work flushing will deadlock.
|
||||
*
|
||||
* 4. Schedule a work item to do all subsequent steps, using the split-out
|
||||
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
|
||||
|
@ -2371,11 +2371,11 @@ retry:
|
|||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_atomic_async_commit(state);
|
||||
ret = drm_atomic_nonblocking_commit(state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
|
||||
/* Driver takes ownership of state on successful async commit. */
|
||||
/* Driver takes ownership of state on successful commit. */
|
||||
return 0;
|
||||
fail:
|
||||
if (ret == -EDEADLK)
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <drm/drm_fb_cma_helper.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define DEFAULT_FBDEFIO_DELAY_MS 50
|
||||
|
||||
struct drm_fb_cma {
|
||||
struct drm_framebuffer fb;
|
||||
struct drm_gem_cma_object *obj[4];
|
||||
|
@ -35,6 +37,61 @@ struct drm_fbdev_cma {
|
|||
struct drm_fb_cma *fb;
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: framebuffer cma helper functions
|
||||
*
|
||||
* Provides helper functions for creating a cma (contiguous memory allocator)
|
||||
* backed framebuffer.
|
||||
*
|
||||
* drm_fb_cma_create() is used in the
|
||||
* (struct drm_mode_config_funcs *)->fb_create callback function to create the
|
||||
* cma backed framebuffer.
|
||||
*
|
||||
* An fbdev framebuffer backed by cma is also available by calling
|
||||
* drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
|
||||
* If CONFIG_FB_DEFERRED_IO is enabled and the callback
|
||||
* (struct drm_framebuffer_funcs)->dirty is set, fb_deferred_io
|
||||
* will be set up automatically. dirty() is called by
|
||||
* drm_fb_helper_deferred_io() in process context (struct delayed_work).
|
||||
*
|
||||
* Example fbdev deferred io code:
|
||||
*
|
||||
* static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
|
||||
* struct drm_file *file_priv,
|
||||
* unsigned flags, unsigned color,
|
||||
* struct drm_clip_rect *clips,
|
||||
* unsigned num_clips)
|
||||
* {
|
||||
* struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
|
||||
* ... push changes ...
|
||||
* return 0;
|
||||
* }
|
||||
*
|
||||
* static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
|
||||
* .destroy = drm_fb_cma_destroy,
|
||||
* .create_handle = drm_fb_cma_create_handle,
|
||||
* .dirty = driver_fbdev_fb_dirty,
|
||||
* };
|
||||
*
|
||||
* static int driver_fbdev_create(struct drm_fb_helper *helper,
|
||||
* struct drm_fb_helper_surface_size *sizes)
|
||||
* {
|
||||
* return drm_fbdev_cma_create_with_funcs(helper, sizes,
|
||||
* &driver_fbdev_fb_funcs);
|
||||
* }
|
||||
*
|
||||
* static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
|
||||
* .fb_probe = driver_fbdev_create,
|
||||
* };
|
||||
*
|
||||
* Initialize:
|
||||
* fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
|
||||
* dev->mode_config.num_crtc,
|
||||
* dev->mode_config.num_connector,
|
||||
* &driver_fb_helper_funcs);
|
||||
*
|
||||
*/
|
||||
|
||||
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
|
||||
{
|
||||
return container_of(helper, struct drm_fbdev_cma, fb_helper);
|
||||
|
@ -45,7 +102,7 @@ static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
|
|||
return container_of(fb, struct drm_fb_cma, fb);
|
||||
}
|
||||
|
||||
static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
|
||||
void drm_fb_cma_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
|
||||
int i;
|
||||
|
@ -58,8 +115,9 @@ static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
|
|||
drm_framebuffer_cleanup(fb);
|
||||
kfree(fb_cma);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_cma_destroy);
|
||||
|
||||
static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
|
||||
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv, unsigned int *handle)
|
||||
{
|
||||
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
|
||||
|
@ -67,6 +125,7 @@ static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
|
|||
return drm_gem_handle_create(file_priv,
|
||||
&fb_cma->obj[0]->base, handle);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_cma_create_handle);
|
||||
|
||||
static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
|
||||
.destroy = drm_fb_cma_destroy,
|
||||
|
@ -76,7 +135,7 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
|
|||
static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_cma_object **obj,
|
||||
unsigned int num_planes)
|
||||
unsigned int num_planes, struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
struct drm_fb_cma *fb_cma;
|
||||
int ret;
|
||||
|
@ -91,7 +150,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
|
|||
for (i = 0; i < num_planes; i++)
|
||||
fb_cma->obj[i] = obj[i];
|
||||
|
||||
ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
|
||||
ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
|
||||
kfree(fb_cma);
|
||||
|
@ -145,7 +204,7 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
|
|||
objs[i] = to_drm_gem_cma_obj(obj);
|
||||
}
|
||||
|
||||
fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
|
||||
fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, &drm_fb_cma_funcs);
|
||||
if (IS_ERR(fb_cma)) {
|
||||
ret = PTR_ERR(fb_cma);
|
||||
goto err_gem_object_unreference;
|
||||
|
@ -233,8 +292,67 @@ static struct fb_ops drm_fbdev_cma_ops = {
|
|||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
fb_deferred_io_mmap(info, vma);
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
|
||||
struct drm_gem_cma_object *cma_obj)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio;
|
||||
struct fb_ops *fbops;
|
||||
|
||||
/*
|
||||
* Per device structures are needed because:
|
||||
* fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
|
||||
* fbdefio: individual delays
|
||||
*/
|
||||
fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
|
||||
fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
|
||||
if (!fbdefio || !fbops) {
|
||||
kfree(fbdefio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* can't be offset from vaddr since dirty() uses cma_obj */
|
||||
fbi->screen_buffer = cma_obj->vaddr;
|
||||
/* fb_deferred_io_fault() needs a physical address */
|
||||
fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
|
||||
|
||||
*fbops = *fbi->fbops;
|
||||
fbi->fbops = fbops;
|
||||
|
||||
fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
|
||||
fbdefio->deferred_io = drm_fb_helper_deferred_io;
|
||||
fbi->fbdefio = fbdefio;
|
||||
fb_deferred_io_init(fbi);
|
||||
fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
|
||||
{
|
||||
if (!fbi->fbdefio)
|
||||
return;
|
||||
|
||||
fb_deferred_io_cleanup(fbi);
|
||||
kfree(fbi->fbdefio);
|
||||
kfree(fbi->fbops);
|
||||
}
|
||||
|
||||
/*
|
||||
* For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
|
||||
* needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
|
||||
*/
|
||||
int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes,
|
||||
struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
|
||||
|
@ -270,7 +388,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
|
|||
goto err_gem_free_object;
|
||||
}
|
||||
|
||||
fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
|
||||
fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
|
||||
if (IS_ERR(fbdev_cma->fb)) {
|
||||
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
|
||||
ret = PTR_ERR(fbdev_cma->fb);
|
||||
|
@ -296,31 +414,48 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
|
|||
fbi->screen_size = size;
|
||||
fbi->fix.smem_len = size;
|
||||
|
||||
if (funcs->dirty) {
|
||||
ret = drm_fbdev_cma_defio_init(fbi, obj);
|
||||
if (ret)
|
||||
goto err_cma_destroy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_cma_destroy:
|
||||
drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
|
||||
drm_fb_cma_destroy(&fbdev_cma->fb->fb);
|
||||
err_fb_info_destroy:
|
||||
drm_fb_helper_release_fbi(helper);
|
||||
err_gem_free_object:
|
||||
dev->driver->gem_free_object(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
|
||||
|
||||
static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
|
||||
.fb_probe = drm_fbdev_cma_create,
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
|
||||
* drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device
|
||||
* @num_crtc: Number of CRTCs
|
||||
* @max_conn_count: Maximum number of connectors
|
||||
* @funcs: fb helper functions, in particular fb_probe()
|
||||
*
|
||||
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
|
||||
*/
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
|
||||
unsigned int preferred_bpp, unsigned int num_crtc,
|
||||
unsigned int max_conn_count)
|
||||
unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
|
||||
{
|
||||
struct drm_fbdev_cma *fbdev_cma;
|
||||
struct drm_fb_helper *helper;
|
||||
|
@ -334,7 +469,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
|||
|
||||
helper = &fbdev_cma->fb_helper;
|
||||
|
||||
drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
|
||||
drm_fb_helper_prepare(dev, helper, funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
|
||||
if (ret < 0) {
|
||||
|
@ -364,6 +499,24 @@ err_free:
|
|||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
|
||||
|
||||
/**
|
||||
* drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device
|
||||
* @num_crtc: Number of CRTCs
|
||||
* @max_conn_count: Maximum number of connectors
|
||||
*
|
||||
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
|
||||
*/
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
||||
unsigned int preferred_bpp, unsigned int num_crtc,
|
||||
unsigned int max_conn_count)
|
||||
{
|
||||
return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
|
||||
max_conn_count, &drm_fb_cma_helper_funcs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
|
||||
|
||||
/**
|
||||
|
@ -373,6 +526,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
|
|||
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
|
||||
{
|
||||
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
|
||||
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
|
||||
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
|
||||
|
||||
if (fbdev_cma->fb) {
|
||||
|
|
|
@ -84,6 +84,15 @@ static LIST_HEAD(kernel_fb_helper_list);
|
|||
* and set up an initial configuration using the detected hardware, drivers
|
||||
* should call drm_fb_helper_single_add_all_connectors() followed by
|
||||
* drm_fb_helper_initial_config().
|
||||
*
|
||||
* If CONFIG_FB_DEFERRED_IO is enabled and &drm_framebuffer_funcs ->dirty is
|
||||
* set, the drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit}
|
||||
* functions will accumulate changes and schedule &fb_helper .dirty_work to run
|
||||
* right away. This worker then calls the dirty() function ensuring that it
|
||||
* will always run in process context since the fb_*() function could be
|
||||
* running in atomic context. If drm_fb_helper_deferred_io() is used as the
|
||||
* deferred_io callback it will also schedule dirty_work with the damage
|
||||
* collected from the mmap page writes.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -637,6 +646,23 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
|
|||
kfree(helper->crtc_info);
|
||||
}
|
||||
|
||||
static void drm_fb_helper_dirty_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
|
||||
dirty_work);
|
||||
struct drm_clip_rect *clip = &helper->dirty_clip;
|
||||
struct drm_clip_rect clip_copy;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&helper->dirty_lock, flags);
|
||||
clip_copy = *clip;
|
||||
clip->x1 = clip->y1 = ~0;
|
||||
clip->x2 = clip->y2 = 0;
|
||||
spin_unlock_irqrestore(&helper->dirty_lock, flags);
|
||||
|
||||
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_prepare - setup a drm_fb_helper structure
|
||||
* @dev: DRM device
|
||||
|
@ -650,6 +676,9 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
|
|||
const struct drm_fb_helper_funcs *funcs)
|
||||
{
|
||||
INIT_LIST_HEAD(&helper->kernel_fb_list);
|
||||
spin_lock_init(&helper->dirty_lock);
|
||||
INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
|
||||
helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
|
||||
helper->funcs = funcs;
|
||||
helper->dev = dev;
|
||||
}
|
||||
|
@ -834,6 +863,59 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
|
||||
|
||||
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
|
||||
u32 width, u32 height)
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
struct drm_clip_rect *clip = &helper->dirty_clip;
|
||||
unsigned long flags;
|
||||
|
||||
if (!helper->fb->funcs->dirty)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&helper->dirty_lock, flags);
|
||||
clip->x1 = min_t(u32, clip->x1, x);
|
||||
clip->y1 = min_t(u32, clip->y1, y);
|
||||
clip->x2 = max_t(u32, clip->x2, x + width);
|
||||
clip->y2 = max_t(u32, clip->y2, y + height);
|
||||
spin_unlock_irqrestore(&helper->dirty_lock, flags);
|
||||
|
||||
schedule_work(&helper->dirty_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
|
||||
* @info: fb_info struct pointer
|
||||
* @pagelist: list of dirty mmap framebuffer pages
|
||||
*
|
||||
* This function is used as the &fb_deferred_io ->deferred_io
|
||||
* callback function for flushing the fbdev mmap writes.
|
||||
*/
|
||||
void drm_fb_helper_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
unsigned long start, end, min, max;
|
||||
struct page *page;
|
||||
u32 y1, y2;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(page, pagelist, lru) {
|
||||
start = page->index << PAGE_SHIFT;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
|
||||
info->var.yres);
|
||||
drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
|
||||
|
||||
/**
|
||||
* drm_fb_helper_sys_read - wrapper around fb_sys_read
|
||||
* @info: fb_info struct pointer
|
||||
|
@ -862,7 +944,14 @@ EXPORT_SYMBOL(drm_fb_helper_sys_read);
|
|||
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return fb_sys_write(info, buf, count, ppos);
|
||||
ssize_t ret;
|
||||
|
||||
ret = fb_sys_write(info, buf, count, ppos);
|
||||
if (ret > 0)
|
||||
drm_fb_helper_dirty(info, 0, 0, info->var.xres,
|
||||
info->var.yres);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_write);
|
||||
|
||||
|
@ -877,6 +966,8 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info,
|
|||
const struct fb_fillrect *rect)
|
||||
{
|
||||
sys_fillrect(info, rect);
|
||||
drm_fb_helper_dirty(info, rect->dx, rect->dy,
|
||||
rect->width, rect->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
|
||||
|
||||
|
@ -891,6 +982,8 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
|
|||
const struct fb_copyarea *area)
|
||||
{
|
||||
sys_copyarea(info, area);
|
||||
drm_fb_helper_dirty(info, area->dx, area->dy,
|
||||
area->width, area->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
|
||||
|
||||
|
@ -905,6 +998,8 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info,
|
|||
const struct fb_image *image)
|
||||
{
|
||||
sys_imageblit(info, image);
|
||||
drm_fb_helper_dirty(info, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
|
||||
|
||||
|
@ -919,6 +1014,8 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info,
|
|||
const struct fb_fillrect *rect)
|
||||
{
|
||||
cfb_fillrect(info, rect);
|
||||
drm_fb_helper_dirty(info, rect->dx, rect->dy,
|
||||
rect->width, rect->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
|
||||
|
||||
|
@ -933,6 +1030,8 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
|
|||
const struct fb_copyarea *area)
|
||||
{
|
||||
cfb_copyarea(info, area);
|
||||
drm_fb_helper_dirty(info, area->dx, area->dy,
|
||||
area->width, area->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
|
||||
|
||||
|
@ -947,6 +1046,8 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info,
|
|||
const struct fb_image *image)
|
||||
{
|
||||
cfb_imageblit(info, image);
|
||||
drm_fb_helper_dirty(info, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
|
||||
|
||||
|
|
|
@ -804,13 +804,66 @@ drm_gem_object_free(struct kref *kref)
|
|||
container_of(kref, struct drm_gem_object, refcount);
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
if (dev->driver->gem_free_object_unlocked) {
|
||||
dev->driver->gem_free_object_unlocked(obj);
|
||||
} else if (dev->driver->gem_free_object) {
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (dev->driver->gem_free_object != NULL)
|
||||
dev->driver->gem_free_object(obj);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free);
|
||||
|
||||
/**
|
||||
* drm_gem_object_unreference_unlocked - release a GEM BO reference
|
||||
* @obj: GEM buffer object
|
||||
*
|
||||
* This releases a reference to @obj. Callers must not hold the
|
||||
* dev->struct_mutex lock when calling this function.
|
||||
*
|
||||
* See also __drm_gem_object_unreference().
|
||||
*/
|
||||
void
|
||||
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
|
||||
if (!obj)
|
||||
return;
|
||||
|
||||
dev = obj->dev;
|
||||
might_lock(&dev->struct_mutex);
|
||||
|
||||
if (dev->driver->gem_free_object_unlocked)
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
|
||||
&dev->struct_mutex))
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
|
||||
|
||||
/**
|
||||
* drm_gem_object_unreference - release a GEM BO reference
|
||||
* @obj: GEM buffer object
|
||||
*
|
||||
* This releases a reference to @obj. Callers must hold the dev->struct_mutex
|
||||
* lock when calling this function, even when the driver doesn't use
|
||||
* dev->struct_mutex for anything.
|
||||
*
|
||||
* For drivers not encumbered with legacy locking use
|
||||
* drm_gem_object_unreference_unlocked() instead.
|
||||
*/
|
||||
void
|
||||
drm_gem_object_unreference(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj) {
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_unreference);
|
||||
|
||||
/**
|
||||
* drm_gem_vm_open - vma->ops->open implementation for GEM
|
||||
* @vma: VM area structure
|
||||
|
|
|
@ -497,7 +497,7 @@ static struct drm_driver etnaviv_drm_driver = {
|
|||
.open = etnaviv_open,
|
||||
.preclose = etnaviv_preclose,
|
||||
.set_busid = drm_platform_set_busid,
|
||||
.gem_free_object = etnaviv_gem_free_object,
|
||||
.gem_free_object_unlocked = etnaviv_gem_free_object,
|
||||
.gem_vm_ops = &vm_ops,
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
|
|
|
@ -270,7 +270,7 @@ static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
|
|||
}
|
||||
|
||||
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
|
||||
bool async)
|
||||
bool nonblock)
|
||||
{
|
||||
struct exynos_drm_private *priv = dev->dev_private;
|
||||
struct exynos_atomic_commit *commit;
|
||||
|
@ -308,7 +308,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
|
|||
|
||||
drm_atomic_helper_swap_state(dev, state);
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
schedule_work(&commit->work);
|
||||
else
|
||||
exynos_atomic_commit_complete(commit);
|
||||
|
|
|
@ -308,7 +308,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
|
|||
#endif
|
||||
|
||||
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
|
||||
bool async);
|
||||
bool nonblock);
|
||||
|
||||
|
||||
extern struct platform_driver fimd_driver;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DRM_FSL_DCU
|
||||
tristate "DRM Support for Freescale DCU"
|
||||
depends on DRM && OF && ARM
|
||||
depends on DRM && OF && ARM && COMMON_CLK
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select BACKLIGHT_LCD_SUPPORT
|
||||
select DRM_KMS_HELPER
|
||||
|
|
|
@ -13431,7 +13431,7 @@ static int intel_atomic_check(struct drm_device *dev,
|
|||
|
||||
static int intel_atomic_prepare_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async)
|
||||
bool nonblock)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_plane_state *plane_state;
|
||||
|
@ -13440,8 +13440,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
|
|||
struct drm_crtc *crtc;
|
||||
int i, ret;
|
||||
|
||||
if (async) {
|
||||
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
|
||||
if (nonblock) {
|
||||
DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -13464,7 +13464,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
|
|||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!ret && !async) {
|
||||
if (!ret && !nonblock) {
|
||||
for_each_plane_in_state(state, plane, plane_state, i) {
|
||||
struct intel_plane_state *intel_plane_state =
|
||||
to_intel_plane_state(plane_state);
|
||||
|
@ -13557,21 +13557,21 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
|
|||
* intel_atomic_commit - commit validated state object
|
||||
* @dev: DRM device
|
||||
* @state: the top-level driver state object
|
||||
* @async: asynchronous commit
|
||||
* @nonblock: nonblocking commit
|
||||
*
|
||||
* This function commits a top-level state object that has been validated
|
||||
* with drm_atomic_helper_check().
|
||||
*
|
||||
* FIXME: Atomic modeset support for i915 is not yet complete. At the moment
|
||||
* we can only handle plane-related operations and do not yet support
|
||||
* asynchronous commit.
|
||||
* nonblocking commit.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success or -errno.
|
||||
*/
|
||||
static int intel_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async)
|
||||
bool nonblock)
|
||||
{
|
||||
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -13583,7 +13583,7 @@ static int intel_atomic_commit(struct drm_device *dev,
|
|||
unsigned long put_domains[I915_MAX_PIPES] = {};
|
||||
unsigned crtc_vblank_mask = 0;
|
||||
|
||||
ret = intel_atomic_prepare_commit(dev, state, async);
|
||||
ret = intel_atomic_prepare_commit(dev, state, nonblock);
|
||||
if (ret) {
|
||||
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -411,7 +411,7 @@ static struct drm_driver imx_drm_driver = {
|
|||
.unload = imx_drm_driver_unload,
|
||||
.lastclose = imx_drm_driver_lastclose,
|
||||
.set_busid = drm_platform_set_busid,
|
||||
.gem_free_object = drm_gem_cma_free_object,
|
||||
.gem_free_object_unlocked = drm_gem_cma_free_object,
|
||||
.gem_vm_ops = &drm_gem_cma_vm_ops,
|
||||
.dumb_create = drm_gem_cma_dumb_create,
|
||||
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
|
||||
|
|
|
@ -121,7 +121,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
|||
if (!file || (event->base.file_priv == file)) {
|
||||
mdp4_crtc->event = NULL;
|
||||
DBG("%s: send event: %p", mdp4_crtc->name, event);
|
||||
drm_send_vblank_event(dev, mdp4_crtc->id, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
|
|
@ -149,7 +149,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
|||
if (!file || (event->base.file_priv == file)) {
|
||||
mdp5_crtc->event = NULL;
|
||||
DBG("%s: send event: %p", mdp5_crtc->name, event);
|
||||
drm_send_vblank_event(dev, mdp5_crtc->id, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
|
|
@ -190,17 +190,16 @@ int msm_atomic_check(struct drm_device *dev,
|
|||
* drm_atomic_helper_commit - commit validated state object
|
||||
* @dev: DRM device
|
||||
* @state: the driver state object
|
||||
* @async: asynchronous commit
|
||||
* @nonblock: nonblocking commit
|
||||
*
|
||||
* This function commits a with drm_atomic_helper_check() pre-validated state
|
||||
* object. This can still fail when e.g. the framebuffer reservation fails. For
|
||||
* now this doesn't implement asynchronous commits.
|
||||
* object. This can still fail when e.g. the framebuffer reservation fails.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success or -errno.
|
||||
*/
|
||||
int msm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
struct drm_atomic_state *state, bool nonblock)
|
||||
{
|
||||
int nplanes = dev->mode_config.num_total_plane;
|
||||
int ncrtcs = dev->mode_config.num_crtc;
|
||||
|
@ -276,7 +275,7 @@ int msm_atomic_commit(struct drm_device *dev,
|
|||
* current layout.
|
||||
*/
|
||||
|
||||
if (async) {
|
||||
if (nonblock) {
|
||||
msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ void __msm_fence_worker(struct work_struct *work);
|
|||
int msm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
int msm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool async);
|
||||
struct drm_atomic_state *state, bool nonblock);
|
||||
|
||||
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ static bool omap_atomic_is_pending(struct omap_drm_private *priv,
|
|||
}
|
||||
|
||||
static int omap_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
struct drm_atomic_state *state, bool nonblock)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_atomic_state_commit *commit;
|
||||
|
@ -177,7 +177,7 @@ static int omap_atomic_commit(struct drm_device *dev,
|
|||
/* Swap the state, this is the point of no return. */
|
||||
drm_atomic_helper_swap_state(dev, state);
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
schedule_work(&commit->work);
|
||||
else
|
||||
omap_atomic_complete(commit);
|
||||
|
|
|
@ -460,7 +460,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
|
|||
.page_flip = qxl_crtc_page_flip,
|
||||
};
|
||||
|
||||
static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
|
||||
|
||||
|
@ -522,12 +522,13 @@ int
|
|||
qxl_framebuffer_init(struct drm_device *dev,
|
||||
struct qxl_framebuffer *qfb,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
struct drm_gem_object *obj,
|
||||
const struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
qfb->obj = obj;
|
||||
ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
|
||||
ret = drm_framebuffer_init(dev, &qfb->base, funcs);
|
||||
if (ret) {
|
||||
qfb->obj = NULL;
|
||||
return ret;
|
||||
|
@ -994,7 +995,7 @@ qxl_user_framebuffer_create(struct drm_device *dev,
|
|||
if (qxl_fb == NULL)
|
||||
return NULL;
|
||||
|
||||
ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
|
||||
ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
|
||||
if (ret) {
|
||||
kfree(qxl_fb);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
|
|
@ -322,8 +322,6 @@ struct qxl_device {
|
|||
struct workqueue_struct *gc_queue;
|
||||
struct work_struct gc_work;
|
||||
|
||||
struct work_struct fb_work;
|
||||
|
||||
struct drm_property *hotplug_mode_update_property;
|
||||
int monitors_config_width;
|
||||
int monitors_config_height;
|
||||
|
@ -387,11 +385,13 @@ int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
|
|||
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
|
||||
|
||||
/* qxl_display.c */
|
||||
void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb);
|
||||
int
|
||||
qxl_framebuffer_init(struct drm_device *dev,
|
||||
struct qxl_framebuffer *rfb,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
struct drm_gem_object *obj,
|
||||
const struct drm_framebuffer_funcs *funcs);
|
||||
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
|
||||
void qxl_send_monitors_config(struct qxl_device *qdev);
|
||||
int qxl_create_monitors_object(struct qxl_device *qdev);
|
||||
|
@ -551,7 +551,6 @@ int qxl_irq_init(struct qxl_device *qdev);
|
|||
irqreturn_t qxl_irq_handler(int irq, void *arg);
|
||||
|
||||
/* qxl_fb.c */
|
||||
int qxl_fb_init(struct qxl_device *qdev);
|
||||
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
|
||||
|
||||
int qxl_debugfs_add_files(struct qxl_device *qdev,
|
||||
|
|
|
@ -46,15 +46,6 @@ struct qxl_fbdev {
|
|||
struct list_head delayed_ops;
|
||||
void *shadow;
|
||||
int size;
|
||||
|
||||
/* dirty memory logging */
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
unsigned x1;
|
||||
unsigned y1;
|
||||
unsigned x2;
|
||||
unsigned y2;
|
||||
} dirty;
|
||||
};
|
||||
|
||||
static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
|
||||
|
@ -82,169 +73,18 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
|
|||
}
|
||||
}
|
||||
|
||||
static void qxl_fb_dirty_flush(struct fb_info *info)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
struct qxl_fb_image qxl_fb_image;
|
||||
struct fb_image *image = &qxl_fb_image.fb_image;
|
||||
unsigned long flags;
|
||||
u32 x1, x2, y1, y2;
|
||||
|
||||
/* TODO: hard coding 32 bpp */
|
||||
int stride = qfbdev->qfb.base.pitches[0];
|
||||
|
||||
spin_lock_irqsave(&qfbdev->dirty.lock, flags);
|
||||
|
||||
x1 = qfbdev->dirty.x1;
|
||||
x2 = qfbdev->dirty.x2;
|
||||
y1 = qfbdev->dirty.y1;
|
||||
y2 = qfbdev->dirty.y2;
|
||||
qfbdev->dirty.x1 = 0;
|
||||
qfbdev->dirty.x2 = 0;
|
||||
qfbdev->dirty.y1 = 0;
|
||||
qfbdev->dirty.y2 = 0;
|
||||
|
||||
spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
|
||||
|
||||
/*
|
||||
* we are using a shadow draw buffer, at qdev->surface0_shadow
|
||||
*/
|
||||
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
|
||||
image->dx = x1;
|
||||
image->dy = y1;
|
||||
image->width = x2 - x1 + 1;
|
||||
image->height = y2 - y1 + 1;
|
||||
image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
|
||||
warnings */
|
||||
image->bg_color = 0;
|
||||
image->depth = 32; /* TODO: take from somewhere? */
|
||||
image->cmap.start = 0;
|
||||
image->cmap.len = 0;
|
||||
image->cmap.red = NULL;
|
||||
image->cmap.green = NULL;
|
||||
image->cmap.blue = NULL;
|
||||
image->cmap.transp = NULL;
|
||||
image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
|
||||
|
||||
qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
|
||||
qxl_draw_opaque_fb(&qxl_fb_image, stride);
|
||||
}
|
||||
|
||||
static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
|
||||
int x, int y, int width, int height)
|
||||
{
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
unsigned long flags;
|
||||
int x2, y2;
|
||||
|
||||
x2 = x + width - 1;
|
||||
y2 = y + height - 1;
|
||||
|
||||
spin_lock_irqsave(&qfbdev->dirty.lock, flags);
|
||||
|
||||
if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
|
||||
(qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
|
||||
if (qfbdev->dirty.y1 < y)
|
||||
y = qfbdev->dirty.y1;
|
||||
if (qfbdev->dirty.y2 > y2)
|
||||
y2 = qfbdev->dirty.y2;
|
||||
if (qfbdev->dirty.x1 < x)
|
||||
x = qfbdev->dirty.x1;
|
||||
if (qfbdev->dirty.x2 > x2)
|
||||
x2 = qfbdev->dirty.x2;
|
||||
}
|
||||
|
||||
qfbdev->dirty.x1 = x;
|
||||
qfbdev->dirty.x2 = x2;
|
||||
qfbdev->dirty.y1 = y;
|
||||
qfbdev->dirty.y2 = y2;
|
||||
|
||||
spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
|
||||
|
||||
schedule_work(&qdev->fb_work);
|
||||
}
|
||||
|
||||
static void qxl_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
unsigned long start, end, min, max;
|
||||
struct page *page;
|
||||
int y1, y2;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(page, pagelist, lru) {
|
||||
start = page->index << PAGE_SHIFT;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = (max / info->fix.line_length) + 1;
|
||||
qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
|
||||
}
|
||||
};
|
||||
|
||||
static struct fb_deferred_io qxl_defio = {
|
||||
.delay = QXL_DIRTY_DELAY,
|
||||
.deferred_io = qxl_deferred_io,
|
||||
.deferred_io = drm_fb_helper_deferred_io,
|
||||
};
|
||||
|
||||
static void qxl_fb_fillrect(struct fb_info *info,
|
||||
const struct fb_fillrect *rect)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_fillrect(info, rect);
|
||||
qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
|
||||
rect->height);
|
||||
}
|
||||
|
||||
static void qxl_fb_copyarea(struct fb_info *info,
|
||||
const struct fb_copyarea *area)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_copyarea(info, area);
|
||||
qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
|
||||
area->height);
|
||||
}
|
||||
|
||||
static void qxl_fb_imageblit(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_imageblit(info, image);
|
||||
qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
|
||||
image->height);
|
||||
}
|
||||
|
||||
static void qxl_fb_work(struct work_struct *work)
|
||||
{
|
||||
struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
|
||||
struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
|
||||
|
||||
qxl_fb_dirty_flush(qfbdev->helper.fbdev);
|
||||
}
|
||||
|
||||
int qxl_fb_init(struct qxl_device *qdev)
|
||||
{
|
||||
INIT_WORK(&qdev->fb_work, qxl_fb_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct fb_ops qxlfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
|
||||
.fb_fillrect = qxl_fb_fillrect,
|
||||
.fb_copyarea = qxl_fb_copyarea,
|
||||
.fb_imageblit = qxl_fb_imageblit,
|
||||
.fb_fillrect = drm_fb_helper_sys_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_sys_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_sys_imageblit,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
|
@ -338,6 +178,57 @@ out_unref:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME
|
||||
* It should not be necessary to have a special dirty() callback for fbdev.
|
||||
*/
|
||||
static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips)
|
||||
{
|
||||
struct qxl_device *qdev = fb->dev->dev_private;
|
||||
struct fb_info *info = qdev->fbdev_info;
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_fb_image qxl_fb_image;
|
||||
struct fb_image *image = &qxl_fb_image.fb_image;
|
||||
|
||||
/* TODO: hard coding 32 bpp */
|
||||
int stride = qfbdev->qfb.base.pitches[0];
|
||||
|
||||
/*
|
||||
* we are using a shadow draw buffer, at qdev->surface0_shadow
|
||||
*/
|
||||
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
|
||||
clips->y1, clips->y2);
|
||||
image->dx = clips->x1;
|
||||
image->dy = clips->y1;
|
||||
image->width = clips->x2 - clips->x1;
|
||||
image->height = clips->y2 - clips->y1;
|
||||
image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
|
||||
warnings */
|
||||
image->bg_color = 0;
|
||||
image->depth = 32; /* TODO: take from somewhere? */
|
||||
image->cmap.start = 0;
|
||||
image->cmap.len = 0;
|
||||
image->cmap.red = NULL;
|
||||
image->cmap.green = NULL;
|
||||
image->cmap.blue = NULL;
|
||||
image->cmap.transp = NULL;
|
||||
image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1);
|
||||
|
||||
qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
|
||||
qxl_draw_opaque_fb(&qxl_fb_image, stride);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
|
||||
.destroy = qxl_user_framebuffer_destroy,
|
||||
.dirty = qxlfb_framebuffer_dirty,
|
||||
};
|
||||
|
||||
static int qxlfb_create(struct qxl_fbdev *qfbdev,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
|
@ -383,7 +274,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
|
|||
|
||||
info->par = qfbdev;
|
||||
|
||||
qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
|
||||
qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
|
||||
&qxlfb_fb_funcs);
|
||||
|
||||
fb = &qfbdev->qfb.base;
|
||||
|
||||
|
@ -504,7 +396,6 @@ int qxl_fbdev_init(struct qxl_device *qdev)
|
|||
qfbdev->qdev = qdev;
|
||||
qdev->mode_info.qfbdev = qfbdev;
|
||||
spin_lock_init(&qfbdev->delayed_ops_lock);
|
||||
spin_lock_init(&qfbdev->dirty.lock);
|
||||
INIT_LIST_HEAD(&qfbdev->delayed_ops);
|
||||
|
||||
drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
|
||||
|
|
|
@ -261,10 +261,6 @@ static int qxl_device_init(struct qxl_device *qdev,
|
|||
qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
|
||||
INIT_WORK(&qdev->gc_work, qxl_gc_work);
|
||||
|
||||
r = qxl_fb_init(qdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -377,7 +377,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
|
|||
|
||||
/* wakeup userspace */
|
||||
if (work->event)
|
||||
drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
|
||||
drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
|
||||
|
||||
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
|
||||
|
||||
|
|
|
@ -525,7 +525,7 @@ static struct drm_driver kms_driver = {
|
|||
.irq_uninstall = radeon_driver_irq_uninstall_kms,
|
||||
.irq_handler = radeon_driver_irq_handler_kms,
|
||||
.ioctls = radeon_ioctls_kms,
|
||||
.gem_free_object = radeon_gem_object_free,
|
||||
.gem_free_object_unlocked = radeon_gem_object_free,
|
||||
.gem_open_object = radeon_gem_object_open,
|
||||
.gem_close_object = radeon_gem_object_close,
|
||||
.dumb_create = radeon_mode_dumb_create,
|
||||
|
|
|
@ -314,7 +314,7 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
|
|||
return;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
drm_send_vblank_event(dev, rcrtc->index, event);
|
||||
drm_crtc_send_vblank_event(&rcrtc->crtc, event);
|
||||
wake_up(&rcrtc->flip_wait);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
|
|
|
@ -283,7 +283,8 @@ static void rcar_du_atomic_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
static int rcar_du_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
struct drm_atomic_state *state,
|
||||
bool nonblock)
|
||||
{
|
||||
struct rcar_du_device *rcdu = dev->dev_private;
|
||||
struct rcar_du_commit *commit;
|
||||
|
@ -328,7 +329,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
|
|||
/* Swap the state, this is the point of no return. */
|
||||
drm_atomic_helper_swap_state(dev, state);
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
schedule_work(&commit->work);
|
||||
else
|
||||
rcar_du_atomic_complete(commit);
|
||||
|
|
|
@ -276,7 +276,7 @@ void rockchip_drm_atomic_work(struct work_struct *work)
|
|||
|
||||
int rockchip_drm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async)
|
||||
bool nonblock)
|
||||
{
|
||||
struct rockchip_drm_private *private = dev->dev_private;
|
||||
struct rockchip_atomic_commit *commit = &private->commit;
|
||||
|
@ -286,7 +286,7 @@ int rockchip_drm_atomic_commit(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* serialize outstanding asynchronous commits */
|
||||
/* serialize outstanding nonblocking commits */
|
||||
mutex_lock(&commit->lock);
|
||||
flush_work(&commit->work);
|
||||
|
||||
|
@ -295,7 +295,7 @@ int rockchip_drm_atomic_commit(struct drm_device *dev,
|
|||
commit->dev = dev;
|
||||
commit->state = state;
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
schedule_work(&commit->work);
|
||||
else
|
||||
rockchip_atomic_commit_complete(commit);
|
||||
|
|
|
@ -440,7 +440,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
|
|||
event = scrtc->event;
|
||||
scrtc->event = NULL;
|
||||
if (event) {
|
||||
drm_send_vblank_event(dev, 0, event);
|
||||
drm_crtc_send_vblank_event(&scrtc->crtc, event);
|
||||
drm_vblank_put(dev, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
|
|
@ -202,7 +202,7 @@ static void sti_atomic_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
static int sti_atomic_commit(struct drm_device *drm,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
struct drm_atomic_state *state, bool nonblock)
|
||||
{
|
||||
struct sti_private *private = drm->dev_private;
|
||||
int err;
|
||||
|
@ -211,7 +211,7 @@ static int sti_atomic_commit(struct drm_device *drm,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* serialize outstanding asynchronous commits */
|
||||
/* serialize outstanding nonblocking commits */
|
||||
mutex_lock(&private->commit.lock);
|
||||
flush_work(&private->commit.work);
|
||||
|
||||
|
@ -223,7 +223,7 @@ static int sti_atomic_commit(struct drm_device *drm,
|
|||
|
||||
drm_atomic_helper_swap_state(drm, state);
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
sti_atomic_schedule(private, state);
|
||||
else
|
||||
sti_atomic_complete(private, state);
|
||||
|
|
|
@ -74,7 +74,7 @@ static void tegra_atomic_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
static int tegra_atomic_commit(struct drm_device *drm,
|
||||
struct drm_atomic_state *state, bool async)
|
||||
struct drm_atomic_state *state, bool nonblock)
|
||||
{
|
||||
struct tegra_drm *tegra = drm->dev_private;
|
||||
int err;
|
||||
|
@ -83,7 +83,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* serialize outstanding asynchronous commits */
|
||||
/* serialize outstanding nonblocking commits */
|
||||
mutex_lock(&tegra->commit.lock);
|
||||
flush_work(&tegra->commit.work);
|
||||
|
||||
|
@ -95,7 +95,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
|
|||
|
||||
drm_atomic_helper_swap_state(drm, state);
|
||||
|
||||
if (async)
|
||||
if (nonblock)
|
||||
tegra_atomic_schedule(tegra, state);
|
||||
else
|
||||
tegra_atomic_complete(tegra, state);
|
||||
|
|
|
@ -707,7 +707,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
|
|||
event = tilcdc_crtc->event;
|
||||
tilcdc_crtc->event = NULL;
|
||||
if (event)
|
||||
drm_send_vblank_event(dev, 0, event);
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
|
|
@ -81,8 +81,6 @@ struct udl_framebuffer {
|
|||
struct drm_framebuffer base;
|
||||
struct udl_gem_object *obj;
|
||||
bool active_16; /* active on the 16-bit channel */
|
||||
int x1, y1, x2, y2; /* dirty rect */
|
||||
spinlock_t dirty_lock;
|
||||
};
|
||||
|
||||
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
|
||||
|
|
|
@ -77,68 +77,6 @@ static uint16_t rgb16(uint32_t col)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: fb_defio.c is holding info->fbdefio.mutex
|
||||
* Touching ANY framebuffer memory that triggers a page fault
|
||||
* in fb_defio will cause a deadlock, when it also tries to
|
||||
* grab the same mutex.
|
||||
*/
|
||||
static void udlfb_dpy_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
struct page *cur;
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
struct drm_device *dev = ufbdev->ufb.base.dev;
|
||||
struct udl_device *udl = dev->dev_private;
|
||||
struct urb *urb;
|
||||
char *cmd;
|
||||
cycles_t start_cycles, end_cycles;
|
||||
int bytes_sent = 0;
|
||||
int bytes_identical = 0;
|
||||
int bytes_rendered = 0;
|
||||
|
||||
if (!fb_defio)
|
||||
return;
|
||||
|
||||
start_cycles = get_cycles();
|
||||
|
||||
urb = udl_get_urb(dev);
|
||||
if (!urb)
|
||||
return;
|
||||
|
||||
cmd = urb->transfer_buffer;
|
||||
|
||||
/* walk the written page list and render each to device */
|
||||
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
||||
|
||||
if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
|
||||
&urb, (char *) info->fix.smem_start,
|
||||
&cmd, cur->index << PAGE_SHIFT,
|
||||
cur->index << PAGE_SHIFT,
|
||||
PAGE_SIZE, &bytes_identical, &bytes_sent))
|
||||
goto error;
|
||||
bytes_rendered += PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (cmd > (char *) urb->transfer_buffer) {
|
||||
/* Send partial buffer remaining before exiting */
|
||||
int len = cmd - (char *) urb->transfer_buffer;
|
||||
udl_submit_urb(dev, urb, len);
|
||||
bytes_sent += len;
|
||||
} else
|
||||
udl_urb_completion(urb);
|
||||
|
||||
error:
|
||||
atomic_add(bytes_sent, &udl->bytes_sent);
|
||||
atomic_add(bytes_identical, &udl->bytes_identical);
|
||||
atomic_add(bytes_rendered, &udl->bytes_rendered);
|
||||
end_cycles = get_cycles();
|
||||
atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
||||
>> 10)), /* Kcycles */
|
||||
&udl->cpu_kcycles_used);
|
||||
}
|
||||
|
||||
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||
int width, int height)
|
||||
{
|
||||
|
@ -152,9 +90,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
|||
struct urb *urb;
|
||||
int aligned_x;
|
||||
int bpp = (fb->base.bits_per_pixel / 8);
|
||||
int x2, y2;
|
||||
bool store_for_later = false;
|
||||
unsigned long flags;
|
||||
|
||||
if (!fb->active_16)
|
||||
return 0;
|
||||
|
@ -180,38 +115,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
|||
(y + height > fb->base.height))
|
||||
return -EINVAL;
|
||||
|
||||
/* if we are in atomic just store the info
|
||||
can't test inside spin lock */
|
||||
if (in_atomic())
|
||||
store_for_later = true;
|
||||
|
||||
x2 = x + width - 1;
|
||||
y2 = y + height - 1;
|
||||
|
||||
spin_lock_irqsave(&fb->dirty_lock, flags);
|
||||
|
||||
if (fb->y1 < y)
|
||||
y = fb->y1;
|
||||
if (fb->y2 > y2)
|
||||
y2 = fb->y2;
|
||||
if (fb->x1 < x)
|
||||
x = fb->x1;
|
||||
if (fb->x2 > x2)
|
||||
x2 = fb->x2;
|
||||
|
||||
if (store_for_later) {
|
||||
fb->x1 = x;
|
||||
fb->x2 = x2;
|
||||
fb->y1 = y;
|
||||
fb->y2 = y2;
|
||||
spin_unlock_irqrestore(&fb->dirty_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fb->x1 = fb->y1 = INT_MAX;
|
||||
fb->x2 = fb->y2 = 0;
|
||||
|
||||
spin_unlock_irqrestore(&fb->dirty_lock, flags);
|
||||
start_cycles = get_cycles();
|
||||
|
||||
urb = udl_get_urb(dev);
|
||||
|
@ -219,14 +122,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
|||
return 0;
|
||||
cmd = urb->transfer_buffer;
|
||||
|
||||
for (i = y; i <= y2 ; i++) {
|
||||
for (i = y; i < height ; i++) {
|
||||
const int line_offset = fb->base.pitches[0] * i;
|
||||
const int byte_offset = line_offset + (x * bpp);
|
||||
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
|
||||
if (udl_render_hline(dev, bpp, &urb,
|
||||
(char *) fb->obj->vmapping,
|
||||
&cmd, byte_offset, dev_byte_offset,
|
||||
(x2 - x + 1) * bpp,
|
||||
width * bpp,
|
||||
&bytes_identical, &bytes_sent))
|
||||
goto error;
|
||||
}
|
||||
|
@ -283,36 +186,6 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_fillrect(info, rect);
|
||||
|
||||
udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
|
||||
rect->height);
|
||||
}
|
||||
|
||||
static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_copyarea(info, region);
|
||||
|
||||
udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
|
||||
region->height);
|
||||
}
|
||||
|
||||
static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct udl_fbdev *ufbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_imageblit(info, image);
|
||||
|
||||
udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
|
||||
image->height);
|
||||
}
|
||||
|
||||
/*
|
||||
* It's common for several clients to have framebuffer open simultaneously.
|
||||
* e.g. both fbcon and X. Makes things interesting.
|
||||
|
@ -339,7 +212,7 @@ static int udl_fb_open(struct fb_info *info, int user)
|
|||
|
||||
if (fbdefio) {
|
||||
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
|
||||
fbdefio->deferred_io = udlfb_dpy_deferred_io;
|
||||
fbdefio->deferred_io = drm_fb_helper_deferred_io;
|
||||
}
|
||||
|
||||
info->fbdefio = fbdefio;
|
||||
|
@ -379,9 +252,9 @@ static struct fb_ops udlfb_ops = {
|
|||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = udl_fb_fillrect,
|
||||
.fb_copyarea = udl_fb_copyarea,
|
||||
.fb_imageblit = udl_fb_imageblit,
|
||||
.fb_fillrect = drm_fb_helper_sys_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_sys_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_sys_imageblit,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
|
@ -458,7 +331,6 @@ udl_framebuffer_init(struct drm_device *dev,
|
|||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&ufb->dirty_lock);
|
||||
ufb->obj = obj;
|
||||
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
|
||||
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
|
||||
|
|
|
@ -93,7 +93,7 @@ static struct vc4_commit *commit_init(struct drm_atomic_state *state)
|
|||
* vc4_atomic_commit - commit validated state object
|
||||
* @dev: DRM device
|
||||
* @state: the driver state object
|
||||
* @async: asynchronous commit
|
||||
* @nonblock: nonblocking commit
|
||||
*
|
||||
* This function commits a with drm_atomic_helper_check() pre-validated state
|
||||
* object. This can still fail when e.g. the framebuffer reservation fails. For
|
||||
|
@ -104,7 +104,7 @@ static struct vc4_commit *commit_init(struct drm_atomic_state *state)
|
|||
*/
|
||||
static int vc4_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async)
|
||||
bool nonblock)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
int ret;
|
||||
|
@ -170,7 +170,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
|
|||
* current layout.
|
||||
*/
|
||||
|
||||
if (async) {
|
||||
if (nonblock) {
|
||||
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
|
||||
vc4_atomic_complete_commit_seqno_cb);
|
||||
} else {
|
||||
|
|
|
@ -164,7 +164,7 @@ static const struct address_space_operations fb_deferred_io_aops = {
|
|||
.set_page_dirty = fb_deferred_io_set_page_dirty,
|
||||
};
|
||||
|
||||
static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_ops = &fb_deferred_io_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
|
@ -173,6 +173,7 @@ static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
|||
vma->vm_private_data = info;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fb_deferred_io_mmap);
|
||||
|
||||
/* workqueue callback */
|
||||
static void fb_deferred_io_work(struct work_struct *work)
|
||||
|
|
|
@ -580,12 +580,21 @@ struct drm_driver {
|
|||
void (*debugfs_cleanup)(struct drm_minor *minor);
|
||||
|
||||
/**
|
||||
* Driver-specific constructor for drm_gem_objects, to set up
|
||||
* obj->driver_private.
|
||||
* @gem_free_object: deconstructor for drm_gem_objects
|
||||
*
|
||||
* Returns 0 on success.
|
||||
* This is deprecated and should not be used by new drivers. Use
|
||||
* @gem_free_object_unlocked instead.
|
||||
*/
|
||||
void (*gem_free_object) (struct drm_gem_object *obj);
|
||||
|
||||
/**
|
||||
* @gem_free_object_unlocked: deconstructor for drm_gem_objects
|
||||
*
|
||||
* This is for drivers which are not encumbered with dev->struct_mutex
|
||||
* legacy locking schemes. Use this hook instead of @gem_free_object.
|
||||
*/
|
||||
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
|
||||
|
||||
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
|
||||
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
|
|||
|
||||
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
|
||||
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
|
||||
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
|
||||
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
|
||||
|
||||
#define for_each_connector_in_state(state, connector, connector_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
|
|
|
@ -40,7 +40,7 @@ int drm_atomic_helper_check(struct drm_device *dev,
|
|||
struct drm_atomic_state *state);
|
||||
int drm_atomic_helper_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async);
|
||||
bool nonblock);
|
||||
|
||||
void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
|
|
|
@ -1886,7 +1886,7 @@ struct drm_mode_config_funcs {
|
|||
* drm_atomic_helper_commit(), or one of the exported sub-functions of
|
||||
* it.
|
||||
*
|
||||
* Asynchronous commits (as indicated with the async parameter) must
|
||||
* Nonblocking commits (as indicated with the nonblock parameter) must
|
||||
* do any preparatory work which might result in an unsuccessful commit
|
||||
* in the context of this callback. The only exceptions are hardware
|
||||
* errors resulting in -EIO. But even in that case the driver must
|
||||
|
@ -1899,7 +1899,7 @@ struct drm_mode_config_funcs {
|
|||
* The driver must wait for any pending rendering to the new
|
||||
* framebuffers to complete before executing the flip. It should also
|
||||
* wait for any pending rendering from other drivers if the underlying
|
||||
* buffer is a shared dma-buf. Asynchronous commits must not wait for
|
||||
* buffer is a shared dma-buf. Nonblocking commits must not wait for
|
||||
* rendering in the context of this callback.
|
||||
*
|
||||
* An application can request to be notified when the atomic commit has
|
||||
|
@ -1930,7 +1930,7 @@ struct drm_mode_config_funcs {
|
|||
*
|
||||
* 0 on success or one of the below negative error codes:
|
||||
*
|
||||
* - -EBUSY, if an asynchronous updated is requested and there is
|
||||
* - -EBUSY, if a nonblocking updated is requested and there is
|
||||
* an earlier updated pending. Drivers are allowed to support a queue
|
||||
* of outstanding updates, but currently no driver supports that.
|
||||
* Note that drivers must wait for preceding updates to complete if a
|
||||
|
@ -1960,7 +1960,7 @@ struct drm_mode_config_funcs {
|
|||
*/
|
||||
int (*atomic_commit)(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async);
|
||||
bool nonblock);
|
||||
|
||||
/**
|
||||
* @atomic_state_alloc:
|
||||
|
|
|
@ -4,11 +4,18 @@
|
|||
struct drm_fbdev_cma;
|
||||
struct drm_gem_cma_object;
|
||||
|
||||
struct drm_fb_helper_surface_size;
|
||||
struct drm_framebuffer_funcs;
|
||||
struct drm_fb_helper_funcs;
|
||||
struct drm_framebuffer;
|
||||
struct drm_fb_helper;
|
||||
struct drm_device;
|
||||
struct drm_file;
|
||||
struct drm_mode_fb_cmd2;
|
||||
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
|
||||
unsigned int preferred_bpp, unsigned int num_crtc,
|
||||
unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs);
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
||||
unsigned int preferred_bpp, unsigned int num_crtc,
|
||||
unsigned int max_conn_count);
|
||||
|
@ -16,6 +23,13 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
|
|||
|
||||
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
|
||||
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
|
||||
int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes,
|
||||
struct drm_framebuffer_funcs *funcs);
|
||||
|
||||
void drm_fb_cma_destroy(struct drm_framebuffer *fb);
|
||||
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv, unsigned int *handle);
|
||||
|
||||
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
|
||||
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
|
|
|
@ -172,6 +172,10 @@ struct drm_fb_helper_connector {
|
|||
* @funcs: driver callbacks for fb helper
|
||||
* @fbdev: emulated fbdev device info struct
|
||||
* @pseudo_palette: fake palette of 16 colors
|
||||
* @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
|
||||
* the screen buffer
|
||||
* @dirty_lock: spinlock protecting @dirty_clip
|
||||
* @dirty_work: worker used to flush the framebuffer
|
||||
*
|
||||
* This is the main structure used by the fbdev helpers. Drivers supporting
|
||||
* fbdev emulation should embedded this into their overall driver structure.
|
||||
|
@ -189,6 +193,9 @@ struct drm_fb_helper {
|
|||
const struct drm_fb_helper_funcs *funcs;
|
||||
struct fb_info *fbdev;
|
||||
u32 pseudo_palette[17];
|
||||
struct drm_clip_rect dirty_clip;
|
||||
spinlock_t dirty_lock;
|
||||
struct work_struct dirty_work;
|
||||
|
||||
/**
|
||||
* @kernel_fb_list:
|
||||
|
@ -245,6 +252,9 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
|||
|
||||
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
|
||||
|
||||
void drm_fb_helper_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist);
|
||||
|
||||
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
|
||||
|
@ -368,6 +378,11 @@ static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void drm_fb_helper_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
}
|
||||
|
||||
static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
|
||||
char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
|
|
|
@ -200,47 +200,29 @@ drm_gem_object_reference(struct drm_gem_object *obj)
|
|||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_unreference - release a GEM BO reference
|
||||
* __drm_gem_object_unreference - raw function to release a GEM BO reference
|
||||
* @obj: GEM buffer object
|
||||
*
|
||||
* This releases a reference to @obj. Callers must hold the dev->struct_mutex
|
||||
* lock when calling this function, even when the driver doesn't use
|
||||
* dev->struct_mutex for anything.
|
||||
* This function is meant to be used by drivers which are not encumbered with
|
||||
* dev->struct_mutex legacy locking and which are using the
|
||||
* gem_free_object_unlocked callback. It avoids all the locking checks and
|
||||
* locking overhead of drm_gem_object_unreference() and
|
||||
* drm_gem_object_unreference_unlocked().
|
||||
*
|
||||
* For drivers not encumbered with legacy locking use
|
||||
* drm_gem_object_unreference_unlocked() instead.
|
||||
* Drivers should never call this directly in their code. Instead they should
|
||||
* wrap it up into a driver_gem_object_unreference(struct driver_gem_object
|
||||
* *obj) wrapper function, and use that. Shared code should never call this, to
|
||||
* avoid breaking drivers by accident which still depend upon dev->struct_mutex
|
||||
* locking.
|
||||
*/
|
||||
static inline void
|
||||
drm_gem_object_unreference(struct drm_gem_object *obj)
|
||||
__drm_gem_object_unreference(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj != NULL) {
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
}
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_unreference_unlocked - release a GEM BO reference
|
||||
* @obj: GEM buffer object
|
||||
*
|
||||
* This releases a reference to @obj. Callers must not hold the
|
||||
* dev->struct_mutex lock when calling this function.
|
||||
*/
|
||||
static inline void
|
||||
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
|
||||
if (!obj)
|
||||
return;
|
||||
|
||||
dev = obj->dev;
|
||||
if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
else
|
||||
might_lock(&dev->struct_mutex);
|
||||
}
|
||||
void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
|
||||
void drm_gem_object_unreference(struct drm_gem_object *obj);
|
||||
|
||||
int drm_gem_handle_create(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
|
|
|
@ -673,6 +673,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
|
|||
}
|
||||
|
||||
/* drivers/video/fb_defio.c */
|
||||
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
|
||||
extern void fb_deferred_io_init(struct fb_info *info);
|
||||
extern void fb_deferred_io_open(struct fb_info *info,
|
||||
struct inode *inode,
|
||||
|
|
Загрузка…
Ссылка в новой задаче