UAPI Changes:
 
   - doc: rules for EBUSY on non-blocking commits; requirements for fourcc
     modifiers; on parsing EDID
   - fbdev/sbuslib: Remove unused FBIOSCURSOR32
   - fourcc: deprecate DRM_FORMAT_MOD_NONE
   - virtio: Support blob resources for memory allocations; Expose host-visible
     and cross-device features
 
 Cross-subsystem Changes:
 
   - devicetree: Add vendor Prefix for Yes Optoelectronics, Shanghai Top Display
     Optoelectronics
   - dma-buf: Add struct dma_buf_map that stores DMA pointer and I/O-memory flag;
     dma_buf_vmap()/vunmap() return address in dma_buf_map; Use struct_size() macro
 
 Core Changes:
 
   - atomic: pass full state to CRTC atomic enable/disable; warn for EBUSY during
     non-blocking commits
   - dp: Prepare for DP 2.0 DPCD
   - dp_mst: Receive extended DPCD caps
   - dma-buf: Documentation
   - doc: Format modifiers; dma-buf-map; Cleanups
   - fbdev: Don't use compat_alloc_user_space(); mark as orphaned
   - fb-helper: Take lock in drm_fb_helper_restore_work_fb()
   - gem: Convert implementation and drivers to GEM object functions, remove
     GEM callbacks from struct drm_driver (expect gem_prime_mmap)
   - panel: Cleanups
   - pci: Add legacy infix to drm_irq_by_busid()
   - sched: Avoid infinite waits in drm_sched_entity_destroy()
   - switcheroo: Cleanups
   - ttm: Remove AGP support; Don't modify caching during swapout; Major
     refactoring of the implementation and API that affects all depending
     drivers; Add ttm_bo_wait_ctx(); Add ttm_bo_pin()/unpin() in favor of
     TTM_PL_FLAG_NO_EVICT; Remove ttm_bo_create(); Remove fault_reserve_notify()
     callback; Push move() implementation into drivers; Remove TTM_PAGE_FLAG_WRITE;
     Replace caching flags with init-time cache setting; Push ttm_tt_bind() into
     drivers; Replace move_notify() with delete_mem_notify(); No overlapping memcpy();
     no more ttm_set_populated()
   - vram-helper: Fix BO top-down placement; TTM-related changes; Init GEM
     object functions with defaults; Default placement in system memory; Cleanups
 
 Driver Changes:
 
   - amdgpu: Use GEM object functions
   - armada: Use GEM object functions
   - aspeed: Configure output via sysfs; Init struct drm_driver with
   - ast: Reload LUT after FB format changes
   - bridge: Add driver and DT bindings for anx7625; Cleanups
   - bridge/dw-hdmi: Constify ops
   - bridge/ti-sn65dsi86: Add retries for link training
   - bridge/lvds-codec: Add support for regulator
   - bridge/tc358768: Restore connector support DRM_GEM_CMA_DRIVEROPS; Cleanups
   - display/ti,j721e-dss: Add DT properies assigned-clocks, assigned-clocks-parent and
     dma-coherent
   - display/ti,am65s-dss: Add DT properies assigned-clocks, assigned-clocks-parent and
     dma-coherent
   - etnaviv: Use GEM object functions
   - exynos: Use GEM object functions
   - fbdev: Cleanups and compiler fixes throughout framebuffer drivers
   - fbdev/cirrusfb: Avoid division by 0
   - gma500: Use GEM object functions; Fix double-free of connector; Cleanups
   - hisilicon/hibmc: I2C-based DDC support; Use to_hibmc_drm_device(); Cleanups
   - i915: Use GEM object functions
   - imx/dcss: Init driver with DRM_GEM_CMA_DRIVER_OPS; Cleanups
   - ingenic: Reset pixel clock when parent clock changes; support reserved
     memory; Alloc F0 and F1 DMA channels at once; Support different pixel formats;
     Revert support for cached mmap buffers
     on F0/F1; support 30-bit/24-bit/8-bit-palette modes
   - komeda: Use DEFINE_SHOW_ATTRIBUTE
   - mcde: Detect platform_get_irq() errors
   - mediatek: Use GEM object functions
   - msm: Use GEM object functions
   - nouveau: Cleanups; TTM-related changes; Use GEM object functions
   - omapdrm: Use GEM object functions
   - panel: Add driver and DT bindings for Novatak nt36672a; Add driver and DT
     bindings for YTC700TLAG-05-201C; Add driver and DT bindings for TDO TL070WSH30;
     Cleanups
   - panel/mantix: Fix reset; Fix deref of NULL pointer in mantix_get_modes()
   - panel/otm8009a: Allow non-continuous dsi clock; Cleanups
   - panel/rm68200: Allow non-continuous dsi clock; Fix mode to 50 FPS
   - panfrost: Fix job timeout handling; Cleanups
   - pl111: Use GEM object functions
   - qxl: Cleanups; TTM-related changes; Pin new BOs with ttm_bo_init_reserved()
   - radeon: Cleanups; TTM-related changes; Use GEM object functions
   - rockchip: Use GEM object functions
   - shmobile: Cleanups
   - tegra: Use GEM object functions
   - tidss: Set drm_plane_helper_funcs.prepare_fb
   - tilcdc: Don't keep vblank interrupt enabled all the time
   - tve200: Detect platform_get_irq() errors
   - vc4: Use GEM object functions; Only register components once DSI is attached;
     Add Maxime as maintainer
   - vgem: Use GEM object functions
   - via: Simplify critical section in via_mem_alloc()
   - virtgpu: Use GEM object functions
   - virtio: Implement blob resources, host-visible and cross-device features;
     Support mapping of host-allocated resources; Use UUID APi; Cleanups
   - vkms: Use GEM object functions; Switch to SHMEM
   - vmwgfx: TTM-related changes; Inline ttm_bo_swapout_all()
   - xen: Use GEM object functions
   - xlnx: Use GEM object functions
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAl+X8N4ACgkQaA3BHVML
 eiPulggAu/RudQn0GmWPcDg9DpM30zE/ppBrmKk+WGWqj6M2DgK9gy+KhJps+Uht
 Fb2jMnUirNS5ZNa5kVhkdazOKqHq5jHHV+SbRPziySzV56TW8lbPU/HOUhKSQbkF
 FUB/YCWbb2kJA23So9VwNkjSJUXKpy896WoVxH7b/gLYL7c+sHUK9TOWAlsbFEmD
 t3kjxQgsHdVhqaZIKE7zg72Vi1AkkhjCVraPQeZY1GgmmLxdQeEKhNO8xdfG3OzY
 US4MYwJ51RfaCDTFr5t1UA224ODxoJtV3dTDDtrx4R5sf4MYJUC4SJYZHIyHyUkm
 9KXjFFzB9+Hd0JjpUHFUyl+4k8JjHQ==
 =GWwb
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.11:

UAPI Changes:

  - doc: rules for EBUSY on non-blocking commits; requirements for fourcc
    modifiers; on parsing EDID
  - fbdev/sbuslib: Remove unused FBIOSCURSOR32
  - fourcc: deprecate DRM_FORMAT_MOD_NONE
  - virtio: Support blob resources for memory allocations; Expose host-visible
    and cross-device features

Cross-subsystem Changes:

  - devicetree: Add vendor Prefix for Yes Optoelectronics, Shanghai Top Display
    Optoelectronics
  - dma-buf: Add struct dma_buf_map that stores DMA pointer and I/O-memory flag;
    dma_buf_vmap()/vunmap() return address in dma_buf_map; Use struct_size() macro

Core Changes:

  - atomic: pass full state to CRTC atomic enable/disable; warn for EBUSY during
    non-blocking commits
  - dp: Prepare for DP 2.0 DPCD
  - dp_mst: Receive extended DPCD caps
  - dma-buf: Documentation
  - doc: Format modifiers; dma-buf-map; Cleanups
  - fbdev: Don't use compat_alloc_user_space(); mark as orphaned
  - fb-helper: Take lock in drm_fb_helper_restore_work_fb()
  - gem: Convert implementation and drivers to GEM object functions, remove
    GEM callbacks from struct drm_driver (expect gem_prime_mmap)
  - panel: Cleanups
  - pci: Add legacy infix to drm_irq_by_busid()
  - sched: Avoid infinite waits in drm_sched_entity_destroy()
  - switcheroo: Cleanups
  - ttm: Remove AGP support; Don't modify caching during swapout; Major
    refactoring of the implementation and API that affects all depending
    drivers; Add ttm_bo_wait_ctx(); Add ttm_bo_pin()/unpin() in favor of
    TTM_PL_FLAG_NO_EVICT; Remove ttm_bo_create(); Remove fault_reserve_notify()
    callback; Push move() implementation into drivers; Remove TTM_PAGE_FLAG_WRITE;
    Replace caching flags with init-time cache setting; Push ttm_tt_bind() into
    drivers; Replace move_notify() with delete_mem_notify(); No overlapping memcpy();
    no more ttm_set_populated()
  - vram-helper: Fix BO top-down placement; TTM-related changes; Init GEM
    object functions with defaults; Default placement in system memory; Cleanups

Driver Changes:

  - amdgpu: Use GEM object functions
  - armada: Use GEM object functions
  - aspeed: Configure output via sysfs; Init struct drm_driver with
  - ast: Reload LUT after FB format changes
  - bridge: Add driver and DT bindings for anx7625; Cleanups
  - bridge/dw-hdmi: Constify ops
  - bridge/ti-sn65dsi86: Add retries for link training
  - bridge/lvds-codec: Add support for regulator
  - bridge/tc358768: Restore connector support DRM_GEM_CMA_DRIVEROPS; Cleanups
  - display/ti,j721e-dss: Add DT properies assigned-clocks, assigned-clocks-parent and
    dma-coherent
  - display/ti,am65s-dss: Add DT properies assigned-clocks, assigned-clocks-parent and
    dma-coherent
  - etnaviv: Use GEM object functions
  - exynos: Use GEM object functions
  - fbdev: Cleanups and compiler fixes throughout framebuffer drivers
  - fbdev/cirrusfb: Avoid division by 0
  - gma500: Use GEM object functions; Fix double-free of connector; Cleanups
  - hisilicon/hibmc: I2C-based DDC support; Use to_hibmc_drm_device(); Cleanups
  - i915: Use GEM object functions
  - imx/dcss: Init driver with DRM_GEM_CMA_DRIVER_OPS; Cleanups
  - ingenic: Reset pixel clock when parent clock changes; support reserved
    memory; Alloc F0 and F1 DMA channels at once; Support different pixel formats;
    Revert support for cached mmap buffers
    on F0/F1; support 30-bit/24-bit/8-bit-palette modes
  - komeda: Use DEFINE_SHOW_ATTRIBUTE
  - mcde: Detect platform_get_irq() errors
  - mediatek: Use GEM object functions
  - msm: Use GEM object functions
  - nouveau: Cleanups; TTM-related changes; Use GEM object functions
  - omapdrm: Use GEM object functions
  - panel: Add driver and DT bindings for Novatak nt36672a; Add driver and DT
    bindings for YTC700TLAG-05-201C; Add driver and DT bindings for TDO TL070WSH30;
    Cleanups
  - panel/mantix: Fix reset; Fix deref of NULL pointer in mantix_get_modes()
  - panel/otm8009a: Allow non-continuous dsi clock; Cleanups
  - panel/rm68200: Allow non-continuous dsi clock; Fix mode to 50 FPS
  - panfrost: Fix job timeout handling; Cleanups
  - pl111: Use GEM object functions
  - qxl: Cleanups; TTM-related changes; Pin new BOs with ttm_bo_init_reserved()
  - radeon: Cleanups; TTM-related changes; Use GEM object functions
  - rockchip: Use GEM object functions
  - shmobile: Cleanups
  - tegra: Use GEM object functions
  - tidss: Set drm_plane_helper_funcs.prepare_fb
  - tilcdc: Don't keep vblank interrupt enabled all the time
  - tve200: Detect platform_get_irq() errors
  - vc4: Use GEM object functions; Only register components once DSI is attached;
    Add Maxime as maintainer
  - vgem: Use GEM object functions
  - via: Simplify critical section in via_mem_alloc()
  - virtgpu: Use GEM object functions
  - virtio: Implement blob resources, host-visible and cross-device features;
    Support mapping of host-allocated resources; Use UUID APi; Cleanups
  - vkms: Use GEM object functions; Switch to SHMEM
  - vmwgfx: TTM-related changes; Inline ttm_bo_swapout_all()
  - xen: Use GEM object functions
  - xlnx: Use GEM object functions

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201027100936.GA4858@linux-uq9g
This commit is contained in:
Dave Airlie 2020-11-04 10:55:11 +10:00
Родитель 3cea11cd5e 4dfec0d1d7
Коммит 1cd260a790
291 изменённых файлов: 7530 добавлений и 3264 удалений

Просмотреть файл

@ -0,0 +1,95 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2019 Analogix Semiconductor, Inc.
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/bridge/analogix,anx7625.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Analogix ANX7625 SlimPort (4K Mobile HD Transmitter)
maintainers:
- Xin Ji <xji@analogixsemi.com>
description: |
The ANX7625 is an ultra-low power 4K Mobile HD Transmitter
designed for portable devices.
properties:
compatible:
items:
- const: analogix,anx7625
reg:
maxItems: 1
interrupts:
description: used for interrupt pin B8.
maxItems: 1
enable-gpios:
description: used for power on chip control, POWER_EN pin D2.
maxItems: 1
reset-gpios:
description: used for reset chip control, RESET_N pin B7.
maxItems: 1
ports:
type: object
properties:
port@0:
type: object
description:
Video port for MIPI DSI input.
port@1:
type: object
description:
Video port for panel or connector.
required:
- port@0
- port@1
required:
- compatible
- reg
- ports
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
i2c0 {
#address-cells = <1>;
#size-cells = <0>;
encoder@58 {
compatible = "analogix,anx7625";
reg = <0x58>;
enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
ports {
#address-cells = <1>;
#size-cells = <0>;
mipi2dp_bridge_in: port@0 {
reg = <0>;
anx7625_in: endpoint {
remote-endpoint = <&mipi_dsi>;
};
};
mipi2dp_bridge_out: port@1 {
reg = <1>;
anx7625_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
};
};

Просмотреть файл

@ -0,0 +1,87 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/novatek,nt36672a.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Novatek NT36672A based DSI display Panels
maintainers:
- Sumit Semwal <sumit.semwal@linaro.org>
description: |
The nt36672a IC from Novatek is a generic DSI Panel IC used to drive dsi
panels.
Right now, support is added only for a Tianma FHD+ LCD display panel with a
resolution of 1080x2246. It is a video mode DSI panel.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- enum:
- tianma,fhd-video
- const: novatek,nt36672a
description: This indicates the panel manufacturer of the panel that is
in turn using the NT36672A panel driver. This compatible string
determines how the NT36672A panel driver is configured for the indicated
panel. The novatek,nt36672a compatible shall always be provided as a fallback.
reset-gpios:
description: phandle of gpio for reset line - This should be 8mA, gpio
can be configured using mux, pinctrl, pinctrl-names (active high)
vddio-supply:
description: phandle of the regulator that provides the supply voltage
Power IC supply
vddpos-supply:
description: phandle of the positive boost supply regulator
vddneg-supply:
description: phandle of the negative boost supply regulator
reg: true
port: true
required:
- compatible
- reg
- vddi0-supply
- vddpos-supply
- vddneg-supply
- reset-gpios
- port
unevaluatedProperties: false
examples:
- |+
#include <dt-bindings/gpio/gpio.h>
dsi0 {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "tianma,fhd-video", "novatek,nt36672a";
reg = <0>;
vddi0-supply = <&vreg_l14a_1p88>;
vddpos-supply = <&lab>;
vddneg-supply = <&ibb>;
reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
port {
tianma_nt36672a_in_0: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};
...

Просмотреть файл

@ -47,6 +47,8 @@ properties:
- panasonic,vvx10f004b00
# Panasonic 10" WUXGA TFT LCD panel
- panasonic,vvx10f034n00
# Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel
- tdo,tl070wsh30
reg:
maxItems: 1
@ -54,6 +56,7 @@ properties:
backlight: true
enable-gpios: true
reset-gpios: true
port: true
power-supply: true

Просмотреть файл

@ -282,6 +282,8 @@ properties:
- vxt,vl050-8048nt-c01
# Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
- winstar,wf35ltiacd
# Yes Optoelectronics YTC700TLAG-05-201C 7" TFT LCD panel
- yes-optoelectronics,ytc700tlag-05-201c
backlight: true
enable-gpios: true

Просмотреть файл

@ -55,6 +55,14 @@ properties:
- const: vp1
- const: vp2
assigned-clocks:
minItems: 1
maxItems: 3
assigned-clock-parents:
minItems: 1
maxItems: 3
interrupts:
maxItems: 1
@ -62,6 +70,9 @@ properties:
maxItems: 1
description: phandle to the associated power domain
dma-coherent:
type: boolean
ports:
type: object
description:

Просмотреть файл

@ -77,6 +77,14 @@ properties:
- const: vp3
- const: vp4
assigned-clocks:
minItems: 1
maxItems: 5
assigned-clock-parents:
minItems: 1
maxItems: 5
interrupts:
items:
- description: common_m DSS Master common
@ -95,6 +103,9 @@ properties:
maxItems: 1
description: phandle to the associated power domain
dma-coherent:
type: boolean
ports:
type: object
description:

Просмотреть файл

@ -1053,6 +1053,8 @@ patternProperties:
description: Trusted Computing Group
"^tcl,.*":
description: Toby Churchill Ltd.
"^tdo,.*":
description: Shangai Top Display Optoelectronics Co., Ltd
"^technexion,.*":
description: TechNexion
"^technologic,.*":
@ -1210,6 +1212,8 @@ patternProperties:
description: Shenzhen Xunlong Software CO.,Limited
"^xylon,.*":
description: Xylon
"^yes-optoelectronics,.*":
description: Yes Optoelectronics Co.,Ltd.
"^ylm,.*":
description: Shenzhen Yangliming Electronic Technology Co., Ltd.
"^yna,.*":

Просмотреть файл

@ -115,6 +115,15 @@ Kernel Functions and Structures Reference
.. kernel-doc:: include/linux/dma-buf.h
:internal:
Buffer Mapping Helpers
~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: include/linux/dma-buf-map.h
:doc: overview
.. kernel-doc:: include/linux/dma-buf-map.h
:internal:
Reservation Objects
-------------------

Просмотреть файл

@ -182,11 +182,11 @@ acquired and release by calling drm_gem_object_get() and drm_gem_object_put()
respectively.
When the last reference to a GEM object is released the GEM core calls
the :c:type:`struct drm_driver <drm_driver>` gem_free_object_unlocked
the :c:type:`struct drm_gem_object_funcs <gem_object_funcs>` free
operation. That operation is mandatory for GEM-enabled drivers and must
free the GEM object and all associated resources.
void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
void (\*free) (struct drm_gem_object \*obj); Drivers are
responsible for freeing all GEM object resources. This includes the
resources created by the GEM core, which need to be released with
drm_gem_object_release().

Просмотреть файл

@ -149,7 +149,7 @@ have to keep track of that lock and either call ``unreference`` or
``unreference_locked`` depending upon context.
Core GEM doesn't have a need for ``struct_mutex`` any more since kernel 4.8,
and there's a ``gem_free_object_unlocked`` callback for any drivers which are
and there's a GEM object ``free`` callback for any drivers which are
entirely ``struct_mutex`` free.
For drivers that need ``struct_mutex`` it should be replaced with a driver-
@ -289,11 +289,8 @@ struct drm_gem_object_funcs
---------------------------
GEM objects can now have a function table instead of having the callbacks on the
DRM driver struct. This is now the preferred way and drivers can be moved over.
We also need a 2nd version of the CMA define that doesn't require the
vmapping to be present (different hook for prime importing). Plus this needs to
be rolled out to all drivers using their own implementations, too.
DRM driver struct. This is now the preferred way. Callbacks in drivers have been
converted, except for struct drm_driver.gem_prime_mmap.
Level: Intermediate
@ -518,9 +515,6 @@ There's a bunch of issues with it:
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
- Drop the return code and error checking from all debugfs functions. Greg KH is
working on this already.
Contact: Daniel Vetter
Level: Intermediate

Просмотреть файл

@ -10,36 +10,24 @@
TODO
====
CRC API Improvements
--------------------
If you want to do any of the items listed below, please share your interest
with VKMS maintainers.
- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
IGT better support
------------------
- Use the alpha value to blend vaddr_src with vaddr_dst instead of
overwriting it in ``blend()``.
- Investigate: (1) test cases on kms_plane that are failing due to timeout on
capturing CRC; (2) when running kms_flip test cases in sequence, some
successful individual test cases are failing randomly.
- Add igt test to check cleared alpha value for XRGB plane format.
- Add igt test to check extreme alpha values i.e. fully opaque and fully
transparent (intermediate values are affected by hw-specific rounding modes).
Runtime Configuration
---------------------
We want to be able to reconfigure vkms instance without having to reload the
module. Use/Test-cases:
- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
compositors).
- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
them first).
- Change output configuration: Plug/unplug screens, change EDID, allow changing
the refresh rate.
The currently proposed solution is to expose vkms configuration through
configfs. All existing module options should be supported through configfs too.
- VKMS already has support for vblanks simulated via hrtimers, which can be
tested with kms_flip test; in some way, we can say that VKMS already mimics
the real hardware vblank. However, we also have virtual hardware that does
not support vblank interrupt and completes page_flip events right away; in
this case, compositor developers may end up creating a busy loop on virtual
hardware. It would be useful to support Virtual Hardware behavior in VKMS
because this can help compositor developers to test their features in
multiple scenarios.
Add Plane Features
------------------
@ -55,33 +43,49 @@ There's lots of plane features we could add support for:
- Additional buffer formats, especially YUV formats for video like NV12.
Low/high bpp RGB formats would also be interesting.
- Async updates (currently only possible on cursor plane using the legacy cursor
api).
- Async updates (currently only possible on cursor plane using the legacy
cursor api).
For all of these, we also want to review the igt test coverage and make sure all
relevant igt testcases work on vkms.
Writeback support
-----------------
Currently vkms only computes a CRC for each frame. Once we have additional plane
features, we could write back the entire composited frame, and expose it as:
- Writeback connector. This is useful for testing compositors if you don't have
hardware with writeback support.
- As a v4l device. This is useful for debugging compositors on special vkms
configurations, so that developers see what's really going on.
For all of these, we also want to review the igt test coverage and make sure
all relevant igt testcases work on vkms.
Prime Buffer Sharing
--------------------
We already have vgem, which is a gem driver for testing rendering, similar to
how vkms is for testing the modeset side. Adding buffer sharing support to vkms
allows us to test them together, to test synchronization and lots of other
features. Also, this allows compositors to test whether they work correctly on
SoC chips, where the display and rendering is very often split between 2
drivers.
- Syzbot report - WARNING in vkms_gem_free_object:
https://syzkaller.appspot.com/bug?extid=e7ad70d406e74d8fc9d0
Runtime Configuration
---------------------
We want to be able to reconfigure vkms instance without having to reload the
module. Use/Test-cases:
- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling
of compositors).
- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
them first).
- Change output configuration: Plug/unplug screens, change EDID, allow changing
the refresh rate.
The currently proposed solution is to expose vkms configuration through
configfs. All existing module options should be supported through configfs
too.
Writeback support
-----------------
- The writeback and CRC capture operations share the use of composer_enabled
boolean to ensure vblanks. Probably, when these operations work together,
composer_enabled needs to refcounting the composer state to proper work.
- Add support for cloned writeback outputs and related test cases using a
cloned output in the IGT kms_writeback.
- As a v4l device. This is useful for debugging compositors on special vkms
configurations, so that developers see what's really going on.
Output Features
---------------
@ -93,7 +97,10 @@ Output Features
- Add support for link status, so that compositors can validate their runtime
fallbacks when e.g. a Display Port link goes bad.
- All the hotplug handling describe under "Runtime Configuration".
CRC API Improvements
--------------------
- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
Atomic Check using eBPF
-----------------------

Просмотреть файл

@ -5576,6 +5576,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
DRM DRIVER FOR NOVATEK NT36672A PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Ben Skeggs <bskeggs@redhat.com>
L: dri-devel@lists.freedesktop.org
@ -5955,6 +5962,7 @@ F: include/uapi/drm/v3d_drm.h
DRM DRIVERS FOR VC4
M: Eric Anholt <eric@anholt.net>
M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
@ -6911,10 +6919,9 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org
S: Maintained
S: Orphan
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/fb/

Просмотреть файл

@ -851,6 +851,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
* On success, the DMA addresses and lengths in the returned scatterlist are
* PAGE_SIZE aligned.
*
* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
@ -904,6 +907,24 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
attach->dir = direction;
}
#ifdef CONFIG_DMA_API_DEBUG
{
struct scatterlist *sg;
u64 addr;
int len;
int i;
for_each_sgtable_dma_sg(sg_table, sg, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
pr_debug("%s: addr %llx or len %x is not page aligned!\n",
__func__, addr, len);
}
}
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@ -1188,68 +1209,72 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
* @map: [out] returns the vmap pointer
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
*
* Returns NULL on error.
* Returns 0 on success, or a negative errno code otherwise.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
void *ptr;
struct dma_buf_map ptr;
int ret = 0;
dma_buf_map_clear(map);
if (WARN_ON(!dmabuf))
return NULL;
return -EINVAL;
if (!dmabuf->ops->vmap)
return NULL;
return -EINVAL;
mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
BUG_ON(!dmabuf->vmap_ptr);
ptr = dmabuf->vmap_ptr;
BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
*map = dmabuf->vmap_ptr;
goto out_unlock;
}
BUG_ON(dmabuf->vmap_ptr);
BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
ptr = dmabuf->ops->vmap(dmabuf);
if (WARN_ON_ONCE(IS_ERR(ptr)))
ptr = NULL;
if (!ptr)
ret = dmabuf->ops->vmap(dmabuf, &ptr);
if (WARN_ON_ONCE(ret))
goto out_unlock;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
*map = dmabuf->vmap_ptr;
out_unlock:
mutex_unlock(&dmabuf->lock);
return ptr;
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
* @vaddr: [in] vmap to vunmap
* @map: [in] vmap pointer to vunmap
*/
void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
if (WARN_ON(!dmabuf))
return;
BUG_ON(!dmabuf->vmap_ptr);
BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
BUG_ON(dmabuf->vmap_ptr != vaddr);
BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
dmabuf->ops->vunmap(dmabuf, vaddr);
dmabuf->vmap_ptr = NULL;
dmabuf->ops->vunmap(dmabuf, map);
dma_buf_map_clear(&dmabuf->vmap_ptr);
}
mutex_unlock(&dmabuf->lock);
}

Просмотреть файл

@ -63,7 +63,7 @@ static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
{
struct dma_resv_list *list;
list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
if (!list)
return NULL;

Просмотреть файл

@ -235,7 +235,7 @@ static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return 0;
}
static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
void *vaddr;
@ -244,10 +244,14 @@ static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
vaddr = dma_heap_buffer_vmap_get(buffer);
mutex_unlock(&buffer->lock);
return vaddr;
if (!vaddr)
return -ENOMEM;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;

Просмотреть файл

@ -287,6 +287,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
default n
help

Просмотреть файл

@ -1479,7 +1479,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
amdgpu_bo_fence(bo,
&avm->process_info->eviction_fence->base,
true);
@ -1558,7 +1558,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
* required.
*/
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
!mem->bo->tbo.pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);

Просмотреть файл

@ -410,7 +410,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
uint32_t domain;
int r;
if (bo->pin_count)
if (bo->tbo.pin_count)
return 0;
/* Don't move this buffer if we have depleted our allowance

Просмотреть файл

@ -1319,6 +1319,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct ttm_resource_manager *man;
int r;
r = pm_runtime_get_sync(dev->dev);
@ -1327,7 +1328,9 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
return r;
}
seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
seq_printf(m, "(%d)\n", r);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);

Просмотреть файл

@ -132,10 +132,7 @@ static void amdgpu_display_unpin_work_func(struct work_struct *__work)
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
DRM_ERROR("failed to unpin buffer after flip\n");
}
amdgpu_bo_unpin(work->old_abo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
@ -249,8 +246,7 @@ pflip_cleanup:
}
unpin:
if (!adev->enable_virtual_display)
if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
DRM_ERROR("failed to unpin new abo in error path\n");
amdgpu_bo_unpin(new_abo);
unreserve:
amdgpu_bo_unreserve(new_abo);

Просмотреть файл

@ -281,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
if (!bo->pin_count) {
if (!bo->tbo.pin_count) {
/* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
@ -390,7 +390,8 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
if (unlikely(ret != 0))
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
if (!bo->tbo.pin_count &&
(bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}

Просмотреть файл

@ -1520,19 +1520,13 @@ static struct drm_driver kms_driver = {
.lastclose = amdgpu_driver_lastclose_kms,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free,
.gem_open_object = amdgpu_gem_object_open,
.gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
.fops = &amdgpu_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,

Просмотреть файл

@ -36,9 +36,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
@ -87,6 +90,7 @@ retry:
return r;
}
*obj = &bo->tbo.base;
(*obj)->funcs = &amdgpu_gem_object_funcs;
return 0;
}
@ -119,8 +123,8 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv)
static int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
@ -152,8 +156,8 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
static void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@ -211,6 +215,15 @@ out_unlock:
ttm_eu_backoff_reservation(&ticket, &list);
}
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.free = amdgpu_gem_object_free,
.open = amdgpu_gem_object_open,
.close = amdgpu_gem_object_close,
.export = amdgpu_gem_prime_export,
.vmap = amdgpu_gem_prime_vmap,
.vunmap = amdgpu_gem_prime_vunmap,
};
/*
* GEM ioctls.
*/
@ -870,7 +883,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
pin_count = READ_ONCE(bo->pin_count);
pin_count = READ_ONCE(bo->tbo.pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);

Просмотреть файл

@ -33,11 +33,6 @@
#define AMDGPU_GEM_DOMAIN_MAX 0x3
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
/*

Просмотреть файл

@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_dma_tt *ttm;
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
*addr = ttm->dma_address[0];
*addr = bo->tbo.ttm->dma_address[0];
break;
case TTM_PL_VRAM:
*addr = amdgpu_bo_gpu_offset(bo);
@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_dma_tt *ttm;
if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
return AMDGPU_BO_INVALID_OFFSET;
return adev->gmc.agp_start + ttm->dma_address[0];
return adev->gmc.agp_start + bo->ttm->dma_address[0];
}
/**

Просмотреть файл

@ -136,7 +136,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;

Просмотреть файл

@ -78,7 +78,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
if (bo->pin_count > 0)
if (bo->tbo.pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
amdgpu_bo_kunmap(bo);
@ -137,7 +137,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_VRAM;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
@ -154,11 +154,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_TT;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
else
places[c].flags |= TTM_PL_FLAG_CACHED;
c++;
}
@ -167,11 +162,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
else
places[c].flags |= TTM_PL_FLAG_CACHED;
c++;
}
@ -179,7 +169,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GDS;
places[c].flags = TTM_PL_FLAG_UNCACHED;
places[c].flags = 0;
c++;
}
@ -187,7 +177,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GWS;
places[c].flags = TTM_PL_FLAG_UNCACHED;
places[c].flags = 0;
c++;
}
@ -195,7 +185,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_OA;
places[c].flags = TTM_PL_FLAG_UNCACHED;
places[c].flags = 0;
c++;
}
@ -203,7 +193,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = TTM_PL_MASK_CACHING;
places[c].flags = 0;
c++;
}
@ -721,7 +711,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
uint32_t domain;
int r;
if (bo->pin_count)
if (bo->tbo.pin_count)
return 0;
domain = bo->preferred_domains;
@ -918,13 +908,13 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
*/
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->pin_count) {
if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
bo->pin_count++;
ttm_bo_pin(&bo->tbo);
if (max_offset != 0) {
u64 domain_start = amdgpu_ttm_domain_start(adev,
@ -955,7 +945,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@ -964,7 +953,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
goto error;
}
bo->pin_count = 1;
ttm_bo_pin(&bo->tbo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
@ -1006,34 +995,16 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (WARN_ON_ONCE(!bo->pin_count)) {
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
ttm_bo_unpin(&bo->tbo);
if (bo->tbo.pin_count)
return;
amdgpu_bo_subtract_pin_size(bo);
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r))
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
return r;
}
/**
@ -1048,6 +1019,8 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
*/
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
struct ttm_resource_manager *man;
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
#ifndef CONFIG_HIBERNATION
if (adev->flags & AMD_IS_APU) {
@ -1055,7 +1028,9 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
return 0;
}
#endif
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
}
static const char *amdgpu_vram_names[] = {
@ -1360,19 +1335,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo;
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
unsigned long offset, size;
int r;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return 0;
abo = ttm_to_amdgpu_bo(bo);
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@ -1385,8 +1355,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
/* Can't move a pinned BO to visible VRAM */
if (abo->pin_count > 0)
return -EINVAL;
if (abo->tbo.pin_count > 0)
return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
@ -1398,15 +1368,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
abo->placement.busy_placement = &abo->placements[1];
r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r != 0))
return r;
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
return VM_FAULT_NOPAGE;
else if (unlikely(r))
return VM_FAULT_SIGBUS;
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
(offset + size) > adev->gmc.visible_vram_size)
return -EINVAL;
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
@ -1489,7 +1462,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));

Просмотреть файл

@ -89,7 +89,6 @@ struct amdgpu_bo {
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u64 flags;
unsigned pin_count;
u64 tiling_flags;
u64 metadata_flags;
void *metadata;
@ -267,7 +266,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo);
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
int amdgpu_bo_late_init(struct amdgpu_device *adev);
@ -285,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,

Просмотреть файл

@ -66,6 +66,8 @@
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
@ -92,7 +94,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
.flags = TTM_PL_MASK_CACHING
.flags = 0
};
/* Don't handle scatter gather BOs */
@ -292,11 +294,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
struct ttm_dma_tt *dma;
dma_addr_t *dma_address;
dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
cpu_addr);
if (r)
@ -538,19 +538,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
placements.flags = TTM_PL_MASK_CACHING;
placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n");
return r;
}
/* set caching flags */
r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
if (unlikely(r)) {
goto out_cleanup;
}
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
goto out_cleanup;
@ -567,8 +561,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
}
/* move BO (in tmp_mem) to new_mem */
r = ttm_bo_move_ttm(bo, ctx, new_mem);
r = ttm_bo_wait_ctx(bo, ctx);
if (unlikely(r))
goto out_cleanup;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem);
ttm_bo_assign_mem(bo, new_mem);
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
@ -599,7 +598,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
placements.flags = TTM_PL_MASK_CACHING;
placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n");
@ -607,11 +606,16 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* move/bind old memory to GTT space */
r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
return r;
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
ttm_bo_assign_mem(bo, &tmp_mem);
/* copy to VRAM */
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (unlikely(r)) {
@ -660,9 +664,17 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int r;
if (new_mem->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
if (r)
return r;
}
amdgpu_bo_move_notify(bo, evict, new_mem);
/* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0))
if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
return -EINVAL;
adev = amdgpu_ttm_adev(bo->bdev);
@ -671,14 +683,24 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
ttm_bo_move_null(bo, new_mem);
return 0;
}
if ((old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) ||
(old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) {
/* bind is enough */
if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = ttm_bo_wait_ctx(bo, ctx);
if (r)
goto fail;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem);
ttm_bo_assign_mem(bo, new_mem);
return 0;
}
if (old_mem->mem_type == AMDGPU_PL_GDS ||
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
@ -712,12 +734,12 @@ memcpy:
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
goto fail;
}
r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r)
return r;
goto fail;
}
if (bo->type == ttm_bo_type_device &&
@ -732,6 +754,11 @@ memcpy:
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0;
fail:
swap(*new_mem, bo->mem);
amdgpu_bo_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
return r;
}
/**
@ -767,6 +794,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@ -811,7 +839,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
* TTM backend functions.
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
struct ttm_tt ttm;
struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
@ -943,7 +971,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
if (!gtt || !gtt->userptr)
return false;
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
@ -1095,7 +1123,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
gart_bind_fail:
if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
return r;
@ -1130,7 +1158,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
}
}
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
@ -1153,7 +1181,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
ttm->pages, gtt->ttm.dma_address, flags);
if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
gtt->bound = true;
return r;
@ -1267,8 +1295,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
gtt->ttm.num_pages, gtt->offset);
gtt->bound = false;
}
@ -1282,7 +1310,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
if (gtt->usertask)
put_task_struct(gtt->usertask);
ttm_dma_tt_fini(&gtt->ttm);
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@ -1296,7 +1324,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt;
enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@ -1304,12 +1334,17 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
}
gtt->gobj = &bo->base;
if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
caching = ttm_write_combined;
else
caching = ttm_cached;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
return &gtt->ttm.ttm;
return &gtt->ttm;
}
/**
@ -1332,7 +1367,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
ttm_tt_set_populated(ttm);
return 0;
}
@ -1352,7 +1386,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
ttm_tt_set_populated(ttm);
return 0;
}
@ -1478,7 +1511,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
/* Return false if no part of the ttm_tt object lies within
* the range
*/
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
@ -1529,7 +1562,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching_state == tt_cached)
if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@ -1699,20 +1732,23 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
return ret;
}
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
amdgpu_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.ttm_tt_bind = &amdgpu_ttm_backend_bind,
.ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
.move_notify = &amdgpu_bo_move_notify,
.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory,
@ -2092,15 +2128,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
adev->mman.buffer_funcs_enabled = enable;
}
static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
vm_fault_t ret;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
ret = amdgpu_bo_fault_reserve_notify(bo);
if (ret)
goto unlock;
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
unlock:
dma_resv_unlock(bo->base.resv);
return ret;
}
static struct vm_operations_struct amdgpu_ttm_vm_ops = {
.fault = amdgpu_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = ttm_bo_vm_access
};
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
int r;
if (adev == NULL)
return -EINVAL;
r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
if (unlikely(r != 0))
return r;
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
vma->vm_ops = &amdgpu_ttm_vm_ops;
return 0;
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,

Просмотреть файл

@ -609,7 +609,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
if (bo->pin_count)
return;
abo = ttm_to_amdgpu_bo(bo);
@ -1790,7 +1790,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv = vm->root.base.bo->tbo.base.resv;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
struct ttm_dma_tt *ttm;
resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
@ -1803,10 +1802,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
if (mem->mem_type == TTM_PL_TT)
pages_addr = bo->tbo.ttm->dma_address;
}
if (bo) {

Просмотреть файл

@ -212,7 +212,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;

Просмотреть файл

@ -116,7 +116,7 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@ -127,7 +127,7 @@ static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);

Просмотреть файл

@ -273,8 +273,10 @@ komeda_crtc_do_flush(struct drm_crtc *crtc,
static void
komeda_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
crtc);
pm_runtime_get_sync(crtc->dev->dev);
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
@ -319,8 +321,10 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
crtc);
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
struct komeda_pipeline *master = kcrtc->master;

Просмотреть файл

@ -41,18 +41,7 @@ static int komeda_register_show(struct seq_file *sf, void *x)
return 0;
}
static int komeda_register_open(struct inode *inode, struct file *filp)
{
return single_open(filp, komeda_register_show, inode->i_private);
}
static const struct file_operations komeda_register_fops = {
.owner = THIS_MODULE,
.open = komeda_register_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
DEFINE_SHOW_ATTRIBUTE(komeda_register);
#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
@ -261,8 +250,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto disable_clk;
}
dev->dma_parms = &mdev->dma_parms;
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
dma_set_max_seg_size(dev, U32_MAX);
mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
if (!mdev->iommu)

Просмотреть файл

@ -163,8 +163,6 @@ struct komeda_dev {
struct device *dev;
/** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
/** @dma_parms: the dma parameters of komeda */
struct device_dma_parameters dma_parms;
/** @chip: the basic chip information */
struct komeda_chip_info chip;

Просмотреть файл

@ -168,7 +168,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
@ -179,7 +179,7 @@ static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);

Просмотреть файл

@ -46,7 +46,7 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
}
static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
@ -70,8 +70,10 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
int err;

Просмотреть файл

@ -467,8 +467,10 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct drm_pending_vblank_event *event;
@ -503,8 +505,10 @@ static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);

Просмотреть файл

@ -37,13 +37,10 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static struct drm_driver armada_drm_driver = {
.lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = armada_gem_prime_export,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
.gem_vm_ops = &armada_gem_vm_ops,
.major = 1,
.minor = 0,
.name = "armada-drm",

Просмотреть файл

@ -25,7 +25,7 @@ static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
const struct vm_operations_struct armada_gem_vm_ops = {
static const struct vm_operations_struct armada_gem_vm_ops = {
.fault = armada_gem_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@ -184,6 +184,12 @@ armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
return dobj->addr;
}
static const struct drm_gem_object_funcs armada_gem_object_funcs = {
.free = armada_gem_free_object,
.export = armada_gem_prime_export,
.vm_ops = &armada_gem_vm_ops,
};
struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
{
@ -195,6 +201,8 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
if (!obj)
return NULL;
obj->obj.funcs = &armada_gem_object_funcs;
drm_gem_private_object_init(dev, &obj->obj, size);
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@ -214,6 +222,8 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
if (!obj)
return NULL;
obj->obj.funcs = &armada_gem_object_funcs;
if (drm_gem_object_init(dev, &obj->obj, size)) {
kfree(obj);
return NULL;

Просмотреть файл

@ -21,8 +21,6 @@ struct armada_gem_object {
void *update_data;
};
extern const struct vm_operations_struct armada_gem_vm_ops;
#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
void armada_gem_free_object(struct drm_gem_object *);

Просмотреть файл

@ -3,6 +3,7 @@ config DRM_ASPEED_GFX
tristate "ASPEED BMC Display Controller"
depends on DRM && OF
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS

Просмотреть файл

@ -75,7 +75,7 @@ int aspeed_gfx_create_output(struct drm_device *drm);
/* CTRL2 */
#define CRT_CTRL_DAC_EN BIT(0)
#define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31)
#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(31, 20)
/* CRT_HORIZ0 */
#define CRT_H_TOTAL(x) (x)

Просмотреть файл

@ -193,12 +193,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_create_object = drm_gem_cma_create_object_default_funcs,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_mmap = drm_gem_prime_mmap,
DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
@ -212,6 +207,69 @@ static const struct of_device_id aspeed_gfx_match[] = {
{ }
};
#define ASPEED_SCU_VGA0 0x50
#define ASPEED_SCU_MISC_CTRL 0x2c
static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct aspeed_gfx *priv = dev_get_drvdata(dev);
u32 val;
int rc;
rc = kstrtou32(buf, 0, &val);
if (rc)
return rc;
if (val > 3)
return -EINVAL;
rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16);
if (rc < 0)
return 0;
return count;
}
static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aspeed_gfx *priv = dev_get_drvdata(dev);
u32 reg;
int rc;
rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, &reg);
if (rc)
return rc;
return sprintf(buf, "%u\n", (reg >> 16) & 0x3);
}
static DEVICE_ATTR_RW(dac_mux);
static ssize_t
vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aspeed_gfx *priv = dev_get_drvdata(dev);
u32 reg;
int rc;
rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, &reg);
if (rc)
return rc;
return sprintf(buf, "%u\n", reg & 1);
}
static DEVICE_ATTR_RO(vga_pw);
static struct attribute *aspeed_sysfs_entries[] = {
&dev_attr_vga_pw.attr,
&dev_attr_dac_mux.attr,
NULL,
};
static struct attribute_group aspeed_sysfs_attr_group = {
.attrs = aspeed_sysfs_entries,
};
static int aspeed_gfx_probe(struct platform_device *pdev)
{
struct aspeed_gfx *priv;
@ -226,6 +284,12 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
return ret;
dev_set_drvdata(&pdev->dev, priv);
ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
if (ret)
return ret;
ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
@ -234,6 +298,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
return 0;
err_unload:
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
aspeed_gfx_unload(&priv->drm);
return ret;
@ -243,6 +308,7 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);

Просмотреть файл

@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
@ -777,9 +776,24 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return 0;
}
static void
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
{
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
if (old_ast_crtc_state->format != ast_crtc_state->format)
ast_crtc_load_lut(ast, crtc);
}
static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
@ -802,8 +816,10 @@ ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
static void
ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
@ -830,6 +846,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};

Просмотреть файл

@ -165,7 +165,7 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c,
}
static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@ -200,7 +200,7 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
}
static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);

Просмотреть файл

@ -55,9 +55,9 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
int adv7511_hdmi_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
static int adv7511_hdmi_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms)
{
struct adv7511 *adv7511 = dev_get_drvdata(dev);
unsigned int audio_source, i2s_format = 0;

Просмотреть файл

@ -25,3 +25,12 @@ config DRM_ANALOGIX_ANX78XX
config DRM_ANALOGIX_DP
tristate
depends on DRM
config DRM_ANALOGIX_ANX7625
tristate "Analogix Anx7625 MIPI to DP interface support"
depends on DRM
depends on OF
help
ANX7625 is an ultra-low power 4K mobile HD transmitter
designed for portable devices. It converts MIPI/DPI to
DisplayPort1.3 4K.

Просмотреть файл

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
obj-$(CONFIG_DRM_ANALOGIX_ANX7625) += anx7625.o
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o

Просмотреть файл

@ -524,7 +524,7 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
}
int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
{
int reg;
int retval = 0;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,390 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
*
*/
#ifndef __ANX7625_H__
#define __ANX7625_H__
#define ANX7625_DRV_VERSION "0.1.04"
/* Loading OCM re-trying times */
#define OCM_LOADING_TIME 10
/********* ANX7625 Register **********/
#define TX_P0_ADDR 0x70
#define TX_P1_ADDR 0x7A
#define TX_P2_ADDR 0x72
#define RX_P0_ADDR 0x7e
#define RX_P1_ADDR 0x84
#define RX_P2_ADDR 0x54
#define RSVD_00_ADDR 0x00
#define RSVD_D1_ADDR 0xD1
#define RSVD_60_ADDR 0x60
#define RSVD_39_ADDR 0x39
#define RSVD_7F_ADDR 0x7F
#define TCPC_INTERFACE_ADDR 0x58
/* Clock frequency in Hz */
#define XTAL_FRQ (27 * 1000000)
#define POST_DIVIDER_MIN 1
#define POST_DIVIDER_MAX 16
#define PLL_OUT_FREQ_MIN 520000000UL
#define PLL_OUT_FREQ_MAX 730000000UL
#define PLL_OUT_FREQ_ABS_MIN 300000000UL
#define PLL_OUT_FREQ_ABS_MAX 800000000UL
#define MAX_UNSIGNED_24BIT 16777215UL
/***************************************************************/
/* Register definition of device address 0x58 */
#define PRODUCT_ID_L 0x02
#define PRODUCT_ID_H 0x03
#define INTR_ALERT_1 0xCC
#define INTR_SOFTWARE_INT BIT(3)
#define INTR_RECEIVED_MSG BIT(5)
#define SYSTEM_STSTUS 0x45
#define INTERFACE_CHANGE_INT 0x44
#define HPD_STATUS_CHANGE 0x80
#define HPD_STATUS 0x80
/******** END of I2C Address 0x58 ********/
/***************************************************************/
/* Register definition of device address 0x70 */
#define I2C_ADDR_70_DPTX 0x70
#define SP_TX_LINK_BW_SET_REG 0xA0
#define SP_TX_LANE_COUNT_SET_REG 0xA1
#define M_VID_0 0xC0
#define M_VID_1 0xC1
#define M_VID_2 0xC2
#define N_VID_0 0xC3
#define N_VID_1 0xC4
#define N_VID_2 0xC5
/***************************************************************/
/* Register definition of device address 0x72 */
#define AUX_RST 0x04
#define RST_CTRL2 0x07
#define SP_TX_TOTAL_LINE_STA_L 0x24
#define SP_TX_TOTAL_LINE_STA_H 0x25
#define SP_TX_ACT_LINE_STA_L 0x26
#define SP_TX_ACT_LINE_STA_H 0x27
#define SP_TX_V_F_PORCH_STA 0x28
#define SP_TX_V_SYNC_STA 0x29
#define SP_TX_V_B_PORCH_STA 0x2A
#define SP_TX_TOTAL_PIXEL_STA_L 0x2B
#define SP_TX_TOTAL_PIXEL_STA_H 0x2C
#define SP_TX_ACT_PIXEL_STA_L 0x2D
#define SP_TX_ACT_PIXEL_STA_H 0x2E
#define SP_TX_H_F_PORCH_STA_L 0x2F
#define SP_TX_H_F_PORCH_STA_H 0x30
#define SP_TX_H_SYNC_STA_L 0x31
#define SP_TX_H_SYNC_STA_H 0x32
#define SP_TX_H_B_PORCH_STA_L 0x33
#define SP_TX_H_B_PORCH_STA_H 0x34
#define SP_TX_VID_CTRL 0x84
#define SP_TX_BPC_MASK 0xE0
#define SP_TX_BPC_6 0x00
#define SP_TX_BPC_8 0x20
#define SP_TX_BPC_10 0x40
#define SP_TX_BPC_12 0x60
#define VIDEO_BIT_MATRIX_12 0x4c
#define AUDIO_CHANNEL_STATUS_1 0xd0
#define AUDIO_CHANNEL_STATUS_2 0xd1
#define AUDIO_CHANNEL_STATUS_3 0xd2
#define AUDIO_CHANNEL_STATUS_4 0xd3
#define AUDIO_CHANNEL_STATUS_5 0xd4
#define AUDIO_CHANNEL_STATUS_6 0xd5
#define TDM_SLAVE_MODE 0x10
#define I2S_SLAVE_MODE 0x08
#define AUDIO_CONTROL_REGISTER 0xe6
#define TDM_TIMING_MODE 0x08
#define I2C_ADDR_72_DPTX 0x72
#define HP_MIN 8
#define HBLANKING_MIN 80
#define SYNC_LEN_DEF 32
#define HFP_HBP_DEF ((HBLANKING_MIN - SYNC_LEN_DEF) / 2)
#define VIDEO_CONTROL_0 0x08
#define ACTIVE_LINES_L 0x14
#define ACTIVE_LINES_H 0x15 /* Bit[7:6] are reserved */
#define VERTICAL_FRONT_PORCH 0x16
#define VERTICAL_SYNC_WIDTH 0x17
#define VERTICAL_BACK_PORCH 0x18
#define HORIZONTAL_TOTAL_PIXELS_L 0x19
#define HORIZONTAL_TOTAL_PIXELS_H 0x1A /* Bit[7:6] are reserved */
#define HORIZONTAL_ACTIVE_PIXELS_L 0x1B
#define HORIZONTAL_ACTIVE_PIXELS_H 0x1C /* Bit[7:6] are reserved */
#define HORIZONTAL_FRONT_PORCH_L 0x1D
#define HORIZONTAL_FRONT_PORCH_H 0x1E /* Bit[7:4] are reserved */
#define HORIZONTAL_SYNC_WIDTH_L 0x1F
#define HORIZONTAL_SYNC_WIDTH_H 0x20 /* Bit[7:4] are reserved */
#define HORIZONTAL_BACK_PORCH_L 0x21
#define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */
/******** END of I2C Address 0x72 *********/
/***************************************************************/
/* Register definition of device address 0x7e */
#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E
#define FLASH_LOAD_STA 0x05
#define FLASH_LOAD_STA_CHK BIT(7)
#define XTAL_FRQ_SEL 0x3F
/* bit field positions */
#define XTAL_FRQ_SEL_POS 5
/* bit field values */
#define XTAL_FRQ_19M2 (0 << XTAL_FRQ_SEL_POS)
#define XTAL_FRQ_27M (4 << XTAL_FRQ_SEL_POS)
#define R_DSC_CTRL_0 0x40
#define READ_STATUS_EN 7
#define CLK_1MEG_RB 6 /* 1MHz clock reset; 0=reset, 0=reset release */
#define DSC_BIST_DONE 1 /* Bit[5:1]: 1=DSC MBIST pass */
#define DSC_EN 0x01 /* 1=DSC enabled, 0=DSC disabled */
#define OCM_FW_VERSION 0x31
#define OCM_FW_REVERSION 0x32
#define AP_AUX_ADDR_7_0 0x11
#define AP_AUX_ADDR_15_8 0x12
#define AP_AUX_ADDR_19_16 0x13
/* Bit[0:3] AUX status, bit 4 op_en, bit 5 address only */
#define AP_AUX_CTRL_STATUS 0x14
#define AP_AUX_CTRL_OP_EN 0x10
#define AP_AUX_CTRL_ADDRONLY 0x20
#define AP_AUX_BUFF_START 0x15
#define PIXEL_CLOCK_L 0x25
#define PIXEL_CLOCK_H 0x26
#define AP_AUX_COMMAND 0x27 /* com+len */
/* Bit 0&1: 3D video structure */
/* 0x01: frame packing, 0x02:Line alternative, 0x03:Side-by-side(full) */
#define AP_AV_STATUS 0x28
#define AP_VIDEO_CHG BIT(2)
#define AP_AUDIO_CHG BIT(3)
#define AP_MIPI_MUTE BIT(4) /* 1:MIPI input mute, 0: ummute */
#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
#define AP_DISABLE_PD BIT(6)
#define AP_DISABLE_DISPLAY BIT(7)
/***************************************************************/
/* Register definition of device address 0x84 */
#define MIPI_PHY_CONTROL_3 0x03
#define MIPI_HS_PWD_CLK 7
#define MIPI_HS_RT_CLK 6
#define MIPI_PD_CLK 5
#define MIPI_CLK_RT_MANUAL_PD_EN 4
#define MIPI_CLK_HS_MANUAL_PD_EN 3
#define MIPI_CLK_DET_DET_BYPASS 2
#define MIPI_CLK_MISS_CTRL 1
#define MIPI_PD_LPTX_CH_MANUAL_PD_EN 0
#define MIPI_LANE_CTRL_0 0x05
#define MIPI_TIME_HS_PRPR 0x08
/*
* After MIPI RX protocol layer received video frames,
* Protocol layer starts to reconstruct video stream from PHY
*/
#define MIPI_VIDEO_STABLE_CNT 0x0A
#define MIPI_LANE_CTRL_10 0x0F
#define MIPI_DIGITAL_ADJ_1 0x1B
#define MIPI_PLL_M_NUM_23_16 0x1E
#define MIPI_PLL_M_NUM_15_8 0x1F
#define MIPI_PLL_M_NUM_7_0 0x20
#define MIPI_PLL_N_NUM_23_16 0x21
#define MIPI_PLL_N_NUM_15_8 0x22
#define MIPI_PLL_N_NUM_7_0 0x23
#define MIPI_DIGITAL_PLL_6 0x2A
/* Bit[7:6]: VCO band control, only effective */
#define MIPI_M_NUM_READY 0x10
#define MIPI_N_NUM_READY 0x08
#define STABLE_INTEGER_CNT_EN 0x04
#define MIPI_PLL_TEST_BIT 0
/* Bit[1:0]: test point output select - */
/* 00: VCO power, 01: dvdd_pdt, 10: dvdd, 11: vcox */
#define MIPI_DIGITAL_PLL_7 0x2B
#define MIPI_PLL_FORCE_N_EN 7
#define MIPI_PLL_FORCE_BAND_EN 6
#define MIPI_PLL_VCO_TUNE_REG 4
/* Bit[5:4]: VCO metal capacitance - */
/* 00: +20% fast, 01: +10% fast (default), 10: typical, 11: -10% slow */
#define MIPI_PLL_VCO_TUNE_REG_VAL 0x30
#define MIPI_PLL_PLL_LDO_BIT 2
/* Bit[3:2]: vco_v2i power - */
/* 00: 1.40V, 01: 1.45V (default), 10: 1.50V, 11: 1.55V */
#define MIPI_PLL_RESET_N 0x02
#define MIPI_FRQ_FORCE_NDET 0
#define MIPI_ALERT_CLR_0 0x2D
#define HS_link_error_clear 7
/* This bit itself is S/C, and it clears 0x84:0x31[7] */
#define MIPI_ALERT_OUT_0 0x31
#define check_sum_err_hs_sync 7
/* This bit is cleared by 0x84:0x2D[7] */
#define MIPI_DIGITAL_PLL_8 0x33
#define MIPI_POST_DIV_VAL 4
/* N means divided by (n+1), n = 0~15 */
#define MIPI_EN_LOCK_FRZ 3
#define MIPI_FRQ_COUNTER_RST 2
#define MIPI_FRQ_SET_REG_8 1
/* Bit 0 is reserved */
#define MIPI_DIGITAL_PLL_9 0x34
#define MIPI_DIGITAL_PLL_16 0x3B
#define MIPI_FRQ_FREEZE_NDET 7
#define MIPI_FRQ_REG_SET_ENABLE 6
#define MIPI_REG_FORCE_SEL_EN 5
#define MIPI_REG_SEL_DIV_REG 4
#define MIPI_REG_FORCE_PRE_DIV_EN 3
/* Bit 2 is reserved */
#define MIPI_FREF_D_IND 1
#define REF_CLK_27000KHZ 1
#define REF_CLK_19200KHZ 0
#define MIPI_REG_PLL_PLL_TEST_ENABLE 0
#define MIPI_DIGITAL_PLL_18 0x3D
#define FRQ_COUNT_RB_SEL 7
#define REG_FORCE_POST_DIV_EN 6
#define MIPI_DPI_SELECT 5
#define SELECT_DSI 1
#define SELECT_DPI 0
#define REG_BAUD_DIV_RATIO 0
#define H_BLANK_L 0x3E
/* For DSC only */
#define H_BLANK_H 0x3F
/* For DSC only; note: bit[7:6] are reserved */
#define MIPI_SWAP 0x4A
#define MIPI_SWAP_CH0 7
#define MIPI_SWAP_CH1 6
#define MIPI_SWAP_CH2 5
#define MIPI_SWAP_CH3 4
#define MIPI_SWAP_CLK 3
/* Bit[2:0] are reserved */
/******** END of I2C Address 0x84 *********/
/* DPCD regs */
#define DPCD_DPCD_REV 0x00
#define DPCD_MAX_LINK_RATE 0x01
#define DPCD_MAX_LANE_COUNT 0x02
/********* ANX7625 Register End **********/
/***************** Display *****************/
enum audio_fs {
AUDIO_FS_441K = 0x00,
AUDIO_FS_48K = 0x02,
AUDIO_FS_32K = 0x03,
AUDIO_FS_882K = 0x08,
AUDIO_FS_96K = 0x0a,
AUDIO_FS_1764K = 0x0c,
AUDIO_FS_192K = 0x0e
};
enum audio_wd_len {
AUDIO_W_LEN_16_20MAX = 0x02,
AUDIO_W_LEN_18_20MAX = 0x04,
AUDIO_W_LEN_17_20MAX = 0x0c,
AUDIO_W_LEN_19_20MAX = 0x08,
AUDIO_W_LEN_20_20MAX = 0x0a,
AUDIO_W_LEN_20_24MAX = 0x03,
AUDIO_W_LEN_22_24MAX = 0x05,
AUDIO_W_LEN_21_24MAX = 0x0d,
AUDIO_W_LEN_23_24MAX = 0x09,
AUDIO_W_LEN_24_24MAX = 0x0b
};
#define I2S_CH_2 0x01
#define TDM_CH_4 0x03
#define TDM_CH_6 0x05
#define TDM_CH_8 0x07
#define MAX_DPCD_BUFFER_SIZE 16
#define ONE_BLOCK_SIZE 128
#define FOUR_BLOCK_SIZE (128 * 4)
#define MAX_EDID_BLOCK 3
#define EDID_TRY_CNT 3
#define SUPPORT_PIXEL_CLOCK 300000
struct s_edid_data {
int edid_block_num;
u8 edid_raw_data[FOUR_BLOCK_SIZE];
};
/***************** Display End *****************/
struct anx7625_platform_data {
struct gpio_desc *gpio_p_on;
struct gpio_desc *gpio_reset;
struct drm_bridge *panel_bridge;
int intp_irq;
u32 low_power_mode;
struct device_node *mipi_host_node;
};
struct anx7625_i2c_client {
struct i2c_client *tx_p0_client;
struct i2c_client *tx_p1_client;
struct i2c_client *tx_p2_client;
struct i2c_client *rx_p0_client;
struct i2c_client *rx_p1_client;
struct i2c_client *rx_p2_client;
struct i2c_client *tcpc_client;
};
struct anx7625_data {
struct anx7625_platform_data pdata;
atomic_t power_status;
int hpd_status;
int hpd_high_cnt;
/* Lock for work queue */
struct mutex lock;
struct i2c_client *client;
struct anx7625_i2c_client i2c;
struct i2c_client *last_client;
struct s_edid_data slimport_edid_p;
struct work_struct work;
struct workqueue_struct *workqueue;
char edid_block;
struct display_timing dt;
u8 display_timing_valid;
struct drm_bridge bridge;
u8 bridge_attached;
struct mipi_dsi_device *dsi;
};
#endif /* __ANX7625_H__ */

Просмотреть файл

@ -170,7 +170,7 @@ static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
}
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
static const struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,

Просмотреть файл

@ -153,9 +153,10 @@ static const char * const tc358764_supplies[] = {
struct tc358764 {
struct device *dev;
struct drm_bridge bridge;
struct drm_connector connector;
struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
struct gpio_desc *gpio_reset;
struct drm_bridge *panel_bridge;
struct drm_panel *panel;
int error;
};
@ -209,6 +210,12 @@ static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
return container_of(bridge, struct tc358764, bridge);
}
static inline
struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
{
return container_of(connector, struct tc358764, connector);
}
static int tc358764_init(struct tc358764 *ctx)
{
u32 v = 0;
@ -271,11 +278,43 @@ static void tc358764_reset(struct tc358764 *ctx)
usleep_range(1000, 2000);
}
static int tc358764_get_modes(struct drm_connector *connector)
{
struct tc358764 *ctx = connector_to_tc358764(connector);
return drm_panel_get_modes(ctx->panel, connector);
}
static const
struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
.get_modes = tc358764_get_modes,
};
static const struct drm_connector_funcs tc358764_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void tc358764_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
if (ret < 0)
dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
}
static void tc358764_post_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret;
ret = drm_panel_unprepare(ctx->panel);
if (ret < 0)
dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
tc358764_reset(ctx);
usleep_range(10000, 15000);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@ -296,28 +335,71 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
ret = tc358764_init(ctx);
if (ret < 0)
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
ret = drm_panel_prepare(ctx->panel);
if (ret < 0)
dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
}
static void tc358764_enable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret = drm_panel_enable(ctx->panel);
if (ret < 0)
dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
static int tc358764_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
struct drm_device *drm = bridge->dev;
int ret;
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
bridge, flags);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, &ctx->connector,
&tc358764_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
return ret;
}
drm_connector_helper_add(&ctx->connector,
&tc358764_connector_helper_funcs);
drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
ctx->connector.funcs->reset(&ctx->connector);
drm_connector_register(&ctx->connector);
return 0;
}
static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
drm_connector_unregister(&ctx->connector);
ctx->panel = NULL;
drm_connector_put(&ctx->connector);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
.disable = tc358764_disable,
.post_disable = tc358764_post_disable,
.enable = tc358764_enable,
.pre_enable = tc358764_pre_enable,
.attach = tc358764_attach,
.detach = tc358764_detach,
};
static int tc358764_parse_dt(struct tc358764 *ctx)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
struct drm_panel *panel;
int ret;
ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@ -326,16 +408,12 @@ static int tc358764_parse_dt(struct tc358764 *ctx)
return PTR_ERR(ctx->gpio_reset);
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret)
return ret;
ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
NULL);
if (ret && ret != -EPROBE_DEFER)
dev_err(dev, "cannot find panel (%d)\n", ret);
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
ctx->panel_bridge = panel_bridge;
return 0;
return ret;
}
static int tc358764_configure_regulators(struct tc358764 *ctx)
@ -381,7 +459,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
return ret;
ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);

Просмотреть файл

@ -106,6 +106,8 @@
#define SN_NUM_GPIOS 4
#define SN_GPIO_PHYSICAL_OFFSET 1
#define SN_LINK_TRAINING_TRIES 10
/**
* struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver.
* @dev: Pointer to our device.
@ -673,6 +675,7 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
{
unsigned int val;
int ret;
int i;
/* set dp clk frequency value */
regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
@ -689,19 +692,34 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
goto exit;
}
/* Semi auto link training mode */
regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
val == ML_TX_MAIN_LINK_OFF ||
val == ML_TX_NORMAL_MODE, 1000,
500 * 1000);
if (ret) {
*last_err_str = "Training complete polling failed";
} else if (val == ML_TX_MAIN_LINK_OFF) {
*last_err_str = "Link training failed, link is off";
ret = -EIO;
/*
* We'll try to link train several times. As part of link training
* the bridge chip will write DP_SET_POWER_D0 to DP_SET_POWER. If
* the panel isn't ready quite it might respond NAK here which means
* we need to try again.
*/
for (i = 0; i < SN_LINK_TRAINING_TRIES; i++) {
/* Semi auto link training mode */
regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
val == ML_TX_MAIN_LINK_OFF ||
val == ML_TX_NORMAL_MODE, 1000,
500 * 1000);
if (ret) {
*last_err_str = "Training complete polling failed";
} else if (val == ML_TX_MAIN_LINK_OFF) {
*last_err_str = "Link training failed, link is off";
ret = -EIO;
continue;
}
break;
}
/* If we saw quite a few retries, add a note about it */
if (!ret && i > SN_LINK_TRAINING_TRIES / 2)
DRM_DEV_INFO(pdata->dev, "Link training needed %d retries\n", i);
exit:
/* Disable the PLL if we failed */
if (ret)
@ -816,8 +834,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
{
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
if (pdata->refclk)
clk_disable_unprepare(pdata->refclk);
clk_disable_unprepare(pdata->refclk);
pm_runtime_put_sync(pdata->dev);
}

Просмотреть файл

@ -281,6 +281,10 @@ EXPORT_SYMBOL(__drm_atomic_state_free);
* needed. It will also grab the relevant CRTC lock to make sure that the state
* is consistent.
*
* WARNING: Drivers may only add new CRTC states to a @state if
* drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit
* not created by userspace through an IOCTL call.
*
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
@ -1262,10 +1266,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
unsigned requested_crtc = 0;
unsigned affected_crtc = 0;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
requested_crtc |= drm_crtc_mask(crtc);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
@ -1313,6 +1322,26 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
affected_crtc |= drm_crtc_mask(crtc);
/*
* For commits that allow modesets drivers can add other CRTCs to the
* atomic commit, e.g. when they need to reallocate global resources.
* This can cause spurious EBUSY, which robs compositors of a very
* effective sanity check for their drawing loop. Therefor only allow
* drivers to add unrelated CRTC states for modeset commits.
*
* FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output
* so compositors know what's going on.
*/
if (affected_crtc != requested_crtc) {
DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
requested_crtc, affected_crtc);
WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
requested_crtc, affected_crtc);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@ -1613,11 +1642,11 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
* to dmesg in case of error irq's. (Hint, you probably want to
* ratelimit this!)
*
* The caller must drm_modeset_lock_all(), or if this is called
* from error irq handler, it should not be enabled by default.
* (Ie. if you are debugging errors you might not care that this
* is racey. But calling this without all modeset locks held is
* not inherently safe.)
* The caller must wrap this drm_modeset_lock_all_ctx() and
* drm_modeset_drop_locks(). If this is called from error irq handler, it should
* not be enabled by default - if you are debugging errors you might
* not care that this is racey, but calling this without all modeset locks held
* is inherently unsafe.
*/
void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
{

Просмотреть файл

@ -1093,7 +1093,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
funcs->atomic_disable(crtc, old_crtc_state);
funcs->atomic_disable(crtc, old_state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
@ -1358,7 +1358,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_crtc_state);
funcs->atomic_enable(crtc, old_state);
else if (funcs->commit)
funcs->commit(crtc);
}
@ -1736,8 +1736,11 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
* overridden by a previous synchronous update's state.
*/
if (old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->hw_done))
!try_wait_for_completion(&old_plane_state->commit->hw_done)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n",
plane->base.id, plane->name);
return -EBUSY;
}
return funcs->atomic_async_check(plane, new_plane_state);
}
@ -1955,6 +1958,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
* commit with nonblocking ones. */
if (!completed && nonblock) {
spin_unlock(&crtc->commit_lock);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
crtc->base.id, crtc->name);
return -EBUSY;
}
} else if (i == 1) {
@ -2129,8 +2135,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
/* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones. */
if (nonblock && old_conn_state->commit &&
!try_wait_for_completion(&old_conn_state->commit->flip_done))
!try_wait_for_completion(&old_conn_state->commit->flip_done)) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
conn->base.id, conn->name);
return -EBUSY;
}
/* Always track connectors explicitly for e.g. link retraining. */
commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
@ -2144,8 +2154,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
/* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones. */
if (nonblock && old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->flip_done))
!try_wait_for_completion(&old_plane_state->commit->flip_done)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
plane->base.id, plane->name);
return -EBUSY;
}
/* Always track planes explicitly for async pageflip support. */
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);

Просмотреть файл

@ -960,6 +960,11 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
*
* User-space should not parse the EDID to obtain information exposed via
* other KMS properties (because the kernel might apply limits, quirks or
* fixups to the EDID). For instance, user-space should not try to parse
* mode lists from the EDID.
* DPMS:
* Legacy property for setting the power state of the connector. For atomic
* drivers this is only provided for backwards compatibility with existing

Просмотреть файл

@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
if (!kref_get_unless_zero(&aux_dev->refcount))
if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);

Просмотреть файл

@ -3686,9 +3686,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
WARN_ON(mgr->mst_primary);
/* get dpcd info */
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (ret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("failed to read DPCD\n");
ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
mgr->aux->name, ret);
goto out_unlock;
}

Просмотреть файл

@ -281,18 +281,12 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
/*
* restore fbcon display for all kms driver's using this helper, used for sysrq
* and panic handling.
*/
static bool drm_fb_helper_force_kernel_mode(void)
/* emergency restore, don't bother with error reporting */
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
bool ret, error = false;
struct drm_fb_helper *helper;
if (list_empty(&kernel_fb_helper_list))
return false;
mutex_lock(&kernel_fb_helper_lock);
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
struct drm_device *dev = helper->dev;
@ -300,22 +294,12 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
ret = drm_client_modeset_commit_locked(&helper->client);
if (ret)
error = true;
drm_client_modeset_commit_locked(&helper->client);
mutex_unlock(&helper->lock);
}
return error;
mutex_unlock(&kernel_fb_helper_lock);
}
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
bool ret;
ret = drm_fb_helper_force_kernel_mode();
if (ret == true)
DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
static void drm_fb_helper_sysrq(int dummy1)

Просмотреть файл

@ -202,6 +202,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },

Просмотреть файл

@ -247,12 +247,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
if (obj->funcs && obj->funcs->close)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
@ -403,14 +400,10 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
if (ret)
goto err_remove;
if (obj->funcs && obj->funcs->open) {
if (obj->funcs->open) {
ret = obj->funcs->open(obj, file_priv);
if (ret)
goto err_revoke;
} else if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret)
goto err_revoke;
}
*handlep = handle;
@ -982,12 +975,11 @@ drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj =
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
if (obj->funcs)
obj->funcs->free(obj);
else if (dev->driver->gem_free_object_unlocked)
dev->driver->gem_free_object_unlocked(obj);
if (WARN_ON(!obj->funcs->free))
return;
obj->funcs->free(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@ -1049,9 +1041,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver. Depending on their requirements, drivers can either
* provide a fault handler in their gem_vm_ops (in which case any accesses to
* Set up the VMA to prepare mapping of the GEM object using the GEM object's
* vm_ops. Depending on their requirements, GEM objects can either
* provide a fault handler in their vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
@ -1065,12 +1057,11 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* callers must verify access restrictions before calling this helper.
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
* size, or if no vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
int ret;
/* Check for valid size. */
@ -1087,7 +1078,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_private_data = obj;
if (obj->funcs && obj->funcs->mmap) {
if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret) {
drm_gem_object_put(obj);
@ -1095,10 +1086,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
}
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
if (obj->funcs && obj->funcs->vm_ops)
if (obj->funcs->vm_ops)
vma->vm_ops = obj->funcs->vm_ops;
else if (dev->driver->gem_vm_ops)
vma->vm_ops = dev->driver->gem_vm_ops;
else {
drm_gem_object_put(obj);
return -EINVAL;
@ -1198,36 +1187,30 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
if (obj->funcs && obj->funcs->print_info)
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
{
if (obj->funcs && obj->funcs->pin)
if (obj->funcs->pin)
return obj->funcs->pin(obj);
else if (obj->dev->driver->gem_prime_pin)
return obj->dev->driver->gem_prime_pin(obj);
else
return 0;
}
void drm_gem_unpin(struct drm_gem_object *obj)
{
if (obj->funcs && obj->funcs->unpin)
if (obj->funcs->unpin)
obj->funcs->unpin(obj);
else if (obj->dev->driver->gem_prime_unpin)
obj->dev->driver->gem_prime_unpin(obj);
}
void *drm_gem_vmap(struct drm_gem_object *obj)
{
void *vaddr;
if (obj->funcs && obj->funcs->vmap)
if (obj->funcs->vmap)
vaddr = obj->funcs->vmap(obj);
else if (obj->dev->driver->gem_prime_vmap)
vaddr = obj->dev->driver->gem_prime_vmap(obj);
else
vaddr = ERR_PTR(-EOPNOTSUPP);
@ -1242,10 +1225,8 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
if (!vaddr)
return;
if (obj->funcs && obj->funcs->vunmap)
if (obj->funcs->vunmap)
obj->funcs->vunmap(obj, vaddr);
else if (obj->dev->driver->gem_prime_vunmap)
obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
/**

Просмотреть файл

@ -171,17 +171,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
* &drm_driver.gem_free_object_unlocked callback.
* &drm_gem_object_funcs.free callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
cma_obj = to_drm_gem_cma_obj(gem_obj);
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
if (gem_obj->import_attach) {
if (cma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) {
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
@ -419,7 +418,7 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
*
* This function exports a scatter/gather table suitable for PRIME usage by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
* set this as their &drm_driver.gem_prime_get_sg_table callback.
* set this as their &drm_gem_object_funcs.get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
@ -525,7 +524,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
* driver's &drm_driver.gem_prime_vmap callback.
* driver's &drm_gem_object_funcs.vmap callback.
*
* Returns:
* The kernel virtual address of the CMA GEM object's backing store.
@ -547,7 +546,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
* This function removes a buffer exported via DRM PRIME from the kernel's
* virtual address space. This is a no-op because CMA buffers cannot be
* unmapped from kernel space. Drivers using the CMA helpers should set this
* as their &drm_driver.gem_prime_vunmap callback.
* as their &drm_gem_object_funcs.vunmap callback.
*/
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
@ -617,22 +616,23 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *obj;
void *vaddr;
struct dma_buf_map map;
int ret;
vaddr = dma_buf_vmap(attach->dmabuf);
if (!vaddr) {
ret = dma_buf_vmap(attach->dmabuf, &map);
if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
return ERR_PTR(-ENOMEM);
return ERR_PTR(ret);
}
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, vaddr);
dma_buf_vunmap(attach->dmabuf, &map);
return obj;
}
cma_obj = to_drm_gem_cma_obj(obj);
cma_obj->vaddr = vaddr;
cma_obj->vaddr = map.vaddr;
return obj;
}

Просмотреть файл

@ -261,13 +261,16 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
struct dma_buf_map map;
int ret = 0;
if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;
if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
if (!ret)
shmem->vaddr = map.vaddr;
} else {
pgprot_t prot = PAGE_KERNEL;
@ -279,11 +282,12 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
if (!shmem->vaddr)
ret = -ENOMEM;
}
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
ret = -ENOMEM;
if (ret) {
DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
@ -333,6 +337,7 @@ EXPORT_SYMBOL(drm_gem_shmem_vmap);
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
if (WARN_ON_ONCE(!shmem->vmap_use_count))
return;
@ -341,7 +346,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
return;
if (obj->import_attach)
dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
dma_buf_vunmap(obj->import_attach->dmabuf, &map);
else
vunmap(shmem->vaddr);

Просмотреть файл

@ -140,22 +140,19 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
unsigned int c = 0;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
pl_flag = TTM_PL_FLAG_TOPDOWN;
invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
invariant_flags;
gbo->placements[c++].flags = invariant_flags;
}
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
gbo->placements[c].mem_type = TTM_PL_SYSTEM;
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
invariant_flags;
gbo->placements[c++].flags = invariant_flags;
}
gbo->placement.num_placement = c;
@ -167,58 +164,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
}
/*
* Note that on error, drm_gem_vram_init will free the buffer object.
*/
static int drm_gem_vram_init(struct drm_device *dev,
struct drm_gem_vram_object *gbo,
size_t size, unsigned long pg_align)
{
struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_bo_device *bdev;
int ret;
size_t acc_size;
if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
kfree(gbo);
return -EINVAL;
}
bdev = &vmm->bdev;
gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret) {
kfree(gbo);
return ret;
}
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
DRM_GEM_VRAM_PL_FLAG_SYSTEM);
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
&gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
if (ret)
/*
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
return ret;
return 0;
}
/**
* drm_gem_vram_create() - Creates a VRAM-backed GEM object
* @dev: the DRM device
* @size: the buffer size in bytes
* @pg_align: the buffer's alignment in multiples of the page size
*
* GEM objects are allocated by calling struct drm_driver.gem_create_object,
* if set. Otherwise kzalloc() will be used. Drivers can set their own GEM
* object functions in struct drm_driver.gem_create_object. If no functions
* are set, the new GEM object will use the default functions from GEM VRAM
* helpers.
*
* Returns:
* A new instance of &struct drm_gem_vram_object on success, or
* an ERR_PTR()-encoded error code otherwise.
@ -228,11 +185,17 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
unsigned long pg_align)
{
struct drm_gem_vram_object *gbo;
struct drm_gem_object *gem;
struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_bo_device *bdev;
int ret;
size_t acc_size;
if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
return ERR_PTR(-EINVAL);
if (dev->driver->gem_create_object) {
struct drm_gem_object *gem =
dev->driver->gem_create_object(dev, size);
gem = dev->driver->gem_create_object(dev, size);
if (!gem)
return ERR_PTR(-ENOMEM);
gbo = drm_gem_vram_of_gem(gem);
@ -240,10 +203,32 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
if (!gbo)
return ERR_PTR(-ENOMEM);
gem = &gbo->bo.base;
}
ret = drm_gem_vram_init(dev, gbo, size, pg_align);
if (ret < 0)
if (!gem->funcs)
gem->funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, gem, size);
if (ret) {
kfree(gbo);
return ERR_PTR(ret);
}
bdev = &vmm->bdev;
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
/*
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
&gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
if (ret)
return ERR_PTR(ret);
return gbo;
@ -301,7 +286,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
*/
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
{
if (WARN_ON_ONCE(!gbo->pin_count))
if (WARN_ON_ONCE(!gbo->bo.pin_count))
return (s64)-ENODEV;
return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
}
@ -310,24 +295,21 @@ EXPORT_SYMBOL(drm_gem_vram_offset);
static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
unsigned long pl_flag)
{
int i, ret;
struct ttm_operation_ctx ctx = { false, false };
int ret;
if (gbo->pin_count)
if (gbo->bo.pin_count)
goto out;
if (pl_flag)
drm_gem_vram_placement(gbo, pl_flag);
for (i = 0; i < gbo->placement.num_placement; ++i)
gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
return ret;
out:
++gbo->pin_count;
ttm_bo_pin(&gbo->bo);
return 0;
}
@ -369,26 +351,9 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
}
EXPORT_SYMBOL(drm_gem_vram_pin);
static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
int i, ret;
struct ttm_operation_ctx ctx = { false, false };
if (WARN_ON_ONCE(!gbo->pin_count))
return 0;
--gbo->pin_count;
if (gbo->pin_count)
return 0;
for (i = 0; i < gbo->placement.num_placement ; ++i)
gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
return ret;
return 0;
ttm_bo_unpin(&gbo->bo);
}
/**
@ -406,10 +371,11 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ret;
ret = drm_gem_vram_unpin_locked(gbo);
drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
return ret;
return 0;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
@ -619,6 +585,23 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
kmap->virtual = NULL;
}
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
int ret;
drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
if (ret) {
swap(*new_mem, gbo->bo.mem);
drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
swap(*new_mem, gbo->bo.mem);
}
return ret;
}
/*
* Helpers for struct drm_gem_object_funcs
*/
@ -941,7 +924,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
if (!tt)
return NULL;
ret = ttm_tt_init(tt, bo, page_flags);
ret = ttm_tt_init(tt, bo, page_flags, ttm_cached);
if (ret < 0)
goto err_ttm_tt_init;
@ -966,9 +949,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
drm_gem_vram_bo_driver_evict_flags(gbo, placement);
}
static void bo_driver_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_gem_vram_object *gbo;
@ -978,7 +959,19 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo,
gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
drm_gem_vram_bo_driver_move_notify(gbo, false, NULL);
}
static int bo_driver_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
struct drm_gem_vram_object *gbo;
gbo = drm_gem_vram_of_bo(bo);
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
@ -992,6 +985,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@ -1005,7 +999,8 @@ static struct ttm_bo_driver bo_driver = {
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags,
.move_notify = bo_driver_move_notify,
.move = bo_driver_move,
.delete_mem_notify = bo_driver_delete_mem_notify,
.io_mem_reserve = bo_driver_io_mem_reserve,
};

Просмотреть файл

@ -53,15 +53,15 @@ void drm_lastclose(struct drm_device *dev);
#ifdef CONFIG_PCI
/* drm_pci.c */
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#else
static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -EINVAL;
}

Просмотреть файл

@ -578,7 +578,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),

Просмотреть файл

@ -139,7 +139,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
}
/**
* drm_irq_by_busid - Get interrupt from bus ID
* drm_legacy_irq_by_busid - Get interrupt from bus ID
* @dev: DRM device
* @data: IOCTL parameter pointing to a drm_irq_busid structure
* @file_priv: DRM file private.
@ -150,8 +150,8 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;

Просмотреть файл

@ -386,8 +386,6 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
if (obj->funcs && obj->funcs->export)
dmabuf = obj->funcs->export(obj, flags);
else if (dev->driver->gem_prime_export)
dmabuf = dev->driver->gem_prime_export(obj, flags);
else
dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf)) {
@ -419,7 +417,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_driver.gem_prime_export driver callback.
* &drm_gem_object_funcs.export callback.
*/
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
@ -622,10 +620,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
if (obj->funcs)
sgt = obj->funcs->get_sg_table(obj);
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (WARN_ON(!obj->funcs->get_sg_table))
return ERR_PTR(-ENOSYS);
sgt = obj->funcs->get_sg_table(obj);
if (IS_ERR(sgt))
return sgt;
ret = dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC);
@ -663,38 +663,41 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
* drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
* @dma_buf: buffer to be mapped
* @map: the virtual address of the buffer
*
* Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
* callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
*
* Returns the kernel virtual address or NULL on failure.
*/
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
void *vaddr;
vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr))
vaddr = NULL;
return PTR_ERR(vaddr);
return vaddr;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
/**
* drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
* @dma_buf: buffer to be unmapped
* @vaddr: the virtual address of the buffer
* @map: the virtual address of the buffer
*
* Releases a kernel virtual mapping. This can be used as the
* &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
*/
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
drm_gem_vunmap(obj, vaddr);
drm_gem_vunmap(obj, map->vaddr);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);

Просмотреть файл

@ -99,7 +99,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
}
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_simple_display_pipe *pipe;
@ -113,7 +113,7 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
}
static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_simple_display_pipe *pipe;

Просмотреть файл

@ -468,12 +468,6 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
@ -490,16 +484,9 @@ static struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
.gem_prime_vmap = etnaviv_gem_prime_vmap,
.gem_prime_vunmap = etnaviv_gem_prime_vunmap,
.gem_prime_mmap = etnaviv_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,

Просмотреть файл

@ -49,7 +49,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);

Просмотреть файл

@ -171,7 +171,7 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@ -559,6 +559,22 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.free = etnaviv_gem_free_object,
.pin = etnaviv_gem_prime_pin,
.unpin = etnaviv_gem_prime_unpin,
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.vunmap = etnaviv_gem_prime_vunmap,
.vm_ops = &vm_ops,
};
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
@ -593,6 +609,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
*obj = &etnaviv_obj->base;
(*obj)->funcs = &etnaviv_gem_object_funcs;
return 0;
}

Просмотреть файл

@ -70,9 +70,10 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
{
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
etnaviv_obj->vaddr);
dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@ -85,9 +86,15 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
struct dma_buf_map map;
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
if (ret)
return NULL;
return map.vaddr;
}
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,

Просмотреть файл

@ -19,7 +19,7 @@
#include "exynos_drm_plane.h"
static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@ -30,7 +30,7 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);

Просмотреть файл

@ -75,11 +75,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
file->driver_priv = NULL;
}
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_RENDER_ALLOW),
@ -124,16 +119,11 @@ static struct drm_driver exynos_drm_driver = {
.open = exynos_drm_open,
.lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
.gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
.gem_prime_vmap = exynos_drm_gem_prime_vmap,
.gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
.gem_prime_mmap = exynos_drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),

Просмотреть файл

@ -127,6 +127,19 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
kfree(exynos_gem);
}
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
.free = exynos_drm_gem_free_object,
.get_sg_table = exynos_drm_gem_prime_get_sg_table,
.vmap = exynos_drm_gem_prime_vmap,
.vunmap = exynos_drm_gem_prime_vunmap,
.vm_ops = &exynos_drm_gem_vm_ops,
};
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@ -141,6 +154,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
exynos_gem->size = size;
obj = &exynos_gem->base;
obj->funcs = &exynos_drm_gem_object_funcs;
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");

Просмотреть файл

@ -43,8 +43,10 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
@ -62,7 +64,7 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;

Просмотреть файл

@ -1501,8 +1501,7 @@ cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
clock_recovery = false;
DRM_DEBUG_KMS("Start train\n");
reg = DP | DP_LINK_TRAIN_PAT_1;
reg = DP | DP_LINK_TRAIN_PAT_1;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
@ -1575,7 +1574,7 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
cr_tries = 0;
DRM_DEBUG_KMS("\n");
reg = DP | DP_LINK_TRAIN_PAT_2;
reg = DP | DP_LINK_TRAIN_PAT_2;
for (;;) {
@ -2083,7 +2082,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
DRM_INFO("failed to retrieve link info, disabling eDP\n");
drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
goto err_priv;
goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],

Просмотреть файл

@ -24,6 +24,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include "framebuffer.h"
#include "gem.h"
#include "gtt.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
@ -285,6 +286,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
if (backing) {
backing->gem.funcs = &psb_gem_object_funcs;
drm_gem_private_object_init(dev, &backing->gem, aligned_size);
return backing;
}

Просмотреть файл

@ -18,7 +18,9 @@
#include "psb_drv.h"
void psb_gem_free_object(struct drm_gem_object *obj)
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
@ -36,6 +38,17 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
return -EINVAL;
}
static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
const struct drm_gem_object_funcs psb_gem_object_funcs = {
.free = psb_gem_free_object,
.vm_ops = &psb_gem_vm_ops,
};
/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
@ -63,6 +76,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC;
}
r->gem.funcs = &psb_gem_object_funcs;
/* Initialize the extra goodies GEM needs to do all the hard work */
if (drm_gem_object_init(dev, &r->gem, size) != 0) {
psb_gtt_free_range(dev, r);
@ -123,7 +137,7 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
vm_fault_t psb_gem_fault(struct vm_fault *vmf)
static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;

Просмотреть файл

@ -8,6 +8,9 @@
#ifndef _GEM_H
#define _GEM_H
extern const struct drm_gem_object_funcs psb_gem_object_funcs;
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
u64 size, u32 *handlep, int stolen, u32 align);
#endif

Просмотреть файл

@ -480,12 +480,6 @@ static const struct dev_pm_ops psb_pm_ops = {
.runtime_idle = psb_runtime_idle,
};
static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct file_operations psb_gem_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@ -507,9 +501,6 @@ static struct drm_driver driver = {
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
.gem_free_object_unlocked = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,

Просмотреть файл

@ -735,12 +735,10 @@ extern const struct drm_connector_helper_funcs
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;

Просмотреть файл

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o

Просмотреть файл

@ -23,15 +23,15 @@
#include "hibmc_drm_regs.h"
struct hibmc_display_panel_pll {
unsigned long M;
unsigned long N;
unsigned long OD;
unsigned long POD;
u64 M;
u64 N;
u64 OD;
u64 POD;
};
struct hibmc_dislay_pll_config {
unsigned long hdisplay;
unsigned long vdisplay;
u64 hdisplay;
u64 vdisplay;
u32 pll1_config_value;
u32 pll2_config_value;
};
@ -52,8 +52,6 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
};
#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
static int hibmc_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@ -104,8 +102,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state;
u32 reg;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
u32 line_l;
struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
struct drm_gem_vram_object *gbo;
if (!state->fb)
@ -157,10 +155,10 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
.atomic_update = hibmc_plane_atomic_update,
};
static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
static void hibmc_crtc_dpms(struct drm_crtc *crtc, u32 dpms)
{
struct hibmc_drm_private *priv = crtc->dev->dev_private;
unsigned int reg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
u32 reg;
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
@ -172,10 +170,10 @@ static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
}
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
u32 reg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@ -191,10 +189,10 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
u32 reg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
@ -214,7 +212,7 @@ static enum drm_mode_status
hibmc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
int i = 0;
size_t i = 0;
int vrefresh = drm_mode_vrefresh(mode);
if (vrefresh < 59 || vrefresh > 61)
@ -229,9 +227,9 @@ hibmc_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_BAD;
}
static unsigned int format_pll_reg(void)
static u32 format_pll_reg(void)
{
unsigned int pllreg = 0;
u32 pllreg = 0;
struct hibmc_display_panel_pll pll = {0};
/*
@ -251,10 +249,10 @@ static unsigned int format_pll_reg(void)
return pllreg;
}
static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
static void set_vclock_hisilicon(struct drm_device *dev, u64 pll)
{
u32 val;
struct hibmc_drm_private *priv = dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
val = readl(priv->mmio + CRT_PLL1_HS);
val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
@ -281,11 +279,10 @@ static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
writel(val, priv->mmio + CRT_PLL1_HS);
}
static void get_pll_config(unsigned long x, unsigned long y,
u32 *pll1, u32 *pll2)
static void get_pll_config(u64 x, u64 y, u32 *pll1, u32 *pll2)
{
int i;
int count = ARRAY_SIZE(hibmc_pll_table);
size_t i;
size_t count = ARRAY_SIZE(hibmc_pll_table);
for (i = 0; i < count; i++) {
if (hibmc_pll_table[i].hdisplay == x &&
@ -308,14 +305,14 @@ static void get_pll_config(unsigned long x, unsigned long y,
* FPGA only supports 7 predefined pixel clocks, and clock select is
* in bit 4:0 of new register 0x802a8.
*/
static unsigned int display_ctrl_adjust(struct drm_device *dev,
struct drm_display_mode *mode,
unsigned int ctrl)
static u32 display_ctrl_adjust(struct drm_device *dev,
struct drm_display_mode *mode,
u32 ctrl)
{
unsigned long x, y;
u64 x, y;
u32 pll1; /* bit[31:0] of PLL */
u32 pll2; /* bit[63:32] of PLL */
struct hibmc_drm_private *priv = dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
x = mode->hdisplay;
y = mode->vdisplay;
@ -360,12 +357,12 @@ static unsigned int display_ctrl_adjust(struct drm_device *dev,
static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
unsigned int val;
u32 val;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_device *dev = crtc->dev;
struct hibmc_drm_private *priv = dev->dev_private;
int width = mode->hsync_end - mode->hsync_start;
int height = mode->vsync_end - mode->vsync_start;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 width = mode->hsync_end - mode->hsync_start;
u32 height = mode->vsync_end - mode->vsync_start;
writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
@ -395,9 +392,9 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
unsigned int reg;
u32 reg;
struct drm_device *dev = crtc->dev;
struct hibmc_drm_private *priv = dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@ -427,7 +424,7 @@ static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct hibmc_drm_private *priv = crtc->dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
@ -437,7 +434,7 @@ static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct hibmc_drm_private *priv = crtc->dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
@ -445,18 +442,18 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
{
struct hibmc_drm_private *priv = crtc->dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
void __iomem *mmio = priv->mmio;
u16 *r, *g, *b;
unsigned int reg;
int i;
u32 reg;
u32 i;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
for (i = 0; i < crtc->gamma_size; i++) {
unsigned int offset = i << 2;
u32 offset = i << 2;
u8 red = *r++ >> 8;
u8 green = *g++ >> 8;
u8 blue = *b++ >> 8;

Просмотреть файл

@ -29,8 +29,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct hibmc_drm_private *priv =
(struct hibmc_drm_private *)dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 status;
status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
@ -122,12 +121,11 @@ static void hibmc_kms_fini(struct hibmc_drm_private *priv)
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
unsigned int power_mode)
void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode)
{
unsigned int control_value = 0;
u32 control_value = 0;
void __iomem *mmio = priv->mmio;
unsigned int input = 1;
u32 input = 1;
if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
return;
@ -145,8 +143,8 @@ void hibmc_set_power_mode(struct hibmc_drm_private *priv,
void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
{
unsigned int gate_reg;
unsigned int mode;
u32 gate_reg;
u32 mode;
void __iomem *mmio = priv->mmio;
/* Get current power mode. */
@ -171,7 +169,7 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
static void hibmc_hw_config(struct hibmc_drm_private *priv)
{
unsigned int reg;
u32 reg;
/* On hardware reset, power mode 0 is default. */
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@ -244,7 +242,7 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
static int hibmc_unload(struct drm_device *dev)
{
struct hibmc_drm_private *priv = dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
drm_atomic_helper_shutdown(dev);

Просмотреть файл

@ -14,31 +14,51 @@
#ifndef HIBMC_DRM_DRV_H
#define HIBMC_DRM_DRV_H
#include <linux/gpio/consumer.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
struct drm_device;
struct hibmc_connector {
struct drm_connector base;
struct i2c_adapter adapter;
struct i2c_algo_bit_data bit_data;
};
struct hibmc_drm_private {
/* hw */
void __iomem *mmio;
void __iomem *fb_map;
unsigned long fb_base;
unsigned long fb_size;
resource_size_t fb_base;
resource_size_t fb_size;
/* drm */
struct drm_device *dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
struct hibmc_connector connector;
bool mode_config_initialized;
};
static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
{
return container_of(connector, struct hibmc_connector, base);
}
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
return dev->dev_private;
}
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
unsigned int power_mode);
u32 power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
unsigned int gate);
u32 gate);
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
@ -47,6 +67,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
extern const struct drm_mode_config_funcs hibmc_mode_funcs;

Просмотреть файл

@ -0,0 +1,99 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Hisilicon Hibmc SoC drm driver
*
* Based on the bochs drm driver.
*
* Copyright (c) 2016 Huawei Limited.
*
* Author:
* Tian Tao <tiantao6@hisilicon.com>
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include "hibmc_drm_drv.h"
#define GPIO_DATA 0x0802A0
#define GPIO_DATA_DIRECTION 0x0802A4
#define I2C_SCL_MASK BIT(0)
#define I2C_SDA_MASK BIT(1)
static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
{
struct hibmc_connector *hibmc_connector = data;
struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if (value) {
tmp_dir &= ~mask;
writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
} else {
u32 tmp_data = readl(priv->mmio + GPIO_DATA);
tmp_data &= ~mask;
writel(tmp_data, priv->mmio + GPIO_DATA);
tmp_dir |= mask;
writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
}
}
static int hibmc_get_i2c_signal(void *data, u32 mask)
{
struct hibmc_connector *hibmc_connector = data;
struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if ((tmp_dir & mask) != mask) {
tmp_dir &= ~mask;
writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
}
return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
}
static void hibmc_ddc_setsda(void *data, int state)
{
hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
}
static void hibmc_ddc_setscl(void *data, int state)
{
hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
}
static int hibmc_ddc_getsda(void *data)
{
return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
}
static int hibmc_ddc_getscl(void *data)
{
return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
}
int hibmc_ddc_create(struct drm_device *drm_dev,
struct hibmc_connector *connector)
{
connector->adapter.owner = THIS_MODULE;
connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
connector->adapter.dev.parent = &drm_dev->pdev->dev;
i2c_set_adapdata(&connector->adapter, connector);
connector->adapter.algo_data = &connector->bit_data;
connector->bit_data.udelay = 20;
connector->bit_data.timeout = usecs_to_jiffies(2000);
connector->bit_data.data = connector;
connector->bit_data.setsda = hibmc_ddc_setsda;
connector->bit_data.setscl = hibmc_ddc_setscl;
connector->bit_data.getsda = hibmc_ddc_getsda;
connector->bit_data.getscl = hibmc_ddc_getscl;
return i2c_bit_add_bus(&connector->adapter);
}

Просмотреть файл

@ -21,21 +21,41 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
int count;
void *edid;
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
edid = drm_get_edid(connector, &hibmc_connector->adapter);
if (edid) {
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
if (count)
goto out;
}
count = drm_add_modes_noedid(connector,
connector->dev->mode_config.max_width,
connector->dev->mode_config.max_height);
drm_set_preferred_mode(connector, 1024, 768);
out:
kfree(edid);
return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
i2c_del_adapter(&hibmc_connector->adapter);
drm_connector_cleanup(connector);
}
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
@ -44,7 +64,7 @@ static const struct drm_connector_helper_funcs
static const struct drm_connector_funcs hibmc_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.destroy = hibmc_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@ -56,7 +76,7 @@ static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
{
u32 reg;
struct drm_device *dev = encoder->dev;
struct hibmc_drm_private *priv = dev->dev_private;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
@ -77,10 +97,17 @@ static const struct drm_encoder_funcs hibmc_encoder_funcs = {
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = priv->dev;
struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
struct drm_connector *connector = &priv->connector;
struct drm_connector *connector = &hibmc_connector->base;
int ret;
ret = hibmc_ddc_create(dev, hibmc_connector);
if (ret) {
drm_err(dev, "failed to create ddc: %d\n", ret);
return ret;
}
encoder->possible_crtcs = 0x1;
ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
@ -91,12 +118,15 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
ret = drm_connector_init(dev, connector, &hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
ret = drm_connector_init_with_ddc(dev, connector,
&hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
&hibmc_connector->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
}
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);

Просмотреть файл

@ -436,7 +436,7 @@ static void ade_dump_regs(void __iomem *base) { }
#endif
static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
@ -459,7 +459,7 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;

Просмотреть файл

@ -77,14 +77,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
i915_gem_object_unpin_pages(obj);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;
return i915_gem_object_pin_map(obj, I915_MAP_WB);
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);

Просмотреть файл

@ -39,9 +39,18 @@ static struct i915_global_object {
struct kmem_cache *slab_objects;
} global;
static const struct drm_gem_object_funcs i915_gem_object_funcs;
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
struct drm_i915_gem_object *obj;
obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
if (!obj)
return NULL;
obj->base.funcs = &i915_gem_object_funcs;
return obj;
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@ -101,7 +110,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
@ -264,7 +273,7 @@ static void __i915_gem_free_work(struct work_struct *work)
i915_gem_flush_free_objects(i915);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
static void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@ -403,6 +412,12 @@ int __init i915_global_objects_init(void)
return 0;
}
static const struct drm_gem_object_funcs i915_gem_object_funcs = {
.free = i915_gem_free_object,
.close = i915_gem_close_object,
.export = i915_gem_prime_export,
};
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"

Просмотреть файл

@ -38,9 +38,6 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table *

Просмотреть файл

@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
struct dma_buf_map map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;
@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}
dma_map = dma_buf_vmap(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@ -150,7 +152,7 @@ static int igt_dmabuf_import(void *arg)
err = 0;
out_dma_map:
dma_buf_vunmap(dmabuf, dma_map);
dma_buf_vunmap(dmabuf, &map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
@ -163,6 +165,7 @@ static int igt_dmabuf_import_ownership(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
struct dma_buf_map map;
void *ptr;
int err;
@ -170,7 +173,8 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
ptr = dma_buf_vmap(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@ -178,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg)
}
memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_vunmap(dmabuf, ptr);
dma_buf_vunmap(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
struct dma_buf_map map;
void *ptr;
int err;
@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);
ptr = dma_buf_vmap(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@ -244,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size);
err = 0;
dma_buf_vunmap(dmabuf, ptr);
dma_buf_vunmap(dmabuf, &map);
out:
dma_buf_put(dmabuf);
return err;

Просмотреть файл

@ -61,18 +61,24 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf)
kfree(mock);
}
static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
void *vaddr;
return vm_map_ram(mock->pages, mock->npages, 0);
vaddr = vm_map_ram(mock->pages, mock->npages, 0);
if (!vaddr)
return -ENOMEM;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
vm_unmap_ram(vaddr, mock->npages);
vm_unmap_ram(map->vaddr, mock->npages);
}
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)

Просмотреть файл

@ -1750,12 +1750,8 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,

Просмотреть файл

@ -85,9 +85,6 @@ static struct drm_driver mock_driver = {
.name = "mock",
.driver_features = DRIVER_GEM,
.release = mock_device_release,
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
};
static void release_dev(struct device *dev)

Просмотреть файл

@ -3,6 +3,7 @@
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
#include <linux/platform_device.h>
@ -77,8 +78,10 @@ static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
@ -111,8 +114,10 @@ static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше