UAPI Changes:
 
 Cross-subsystem Changes:
 
 Core Changes:
   - mst: Improve topology logging
   - edid: Rework and improvements for displayid
 
 Driver Changes:
   - anx7625: Regulators support
   - bridge: Support for the Chipone ICN6211, Lontium LT8912B
   - lt9611: Fix 4k panels handling
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCYGWaewAKCRDj7w1vZxhR
 xXhNAP94jdJwM8G7U9dNAXbkkYcqo/RusREsTIp3V3p4nJTDLQEA7tvre1tXQKVb
 vNl5yzexsaWe9+LsPR5Dm9ZDPk1VFQc=
 =ZVM6
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2021-04-01' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.13:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
  - mst: Improve topology logging
  - edid: Rework and improvements for displayid

Driver Changes:
  - anx7625: Regulators support
  - bridge: Support for the Chipone ICN6211, Lontium LT8912B
  - lt9611: Fix 4k panels handling

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20210401110552.2b3yetlgsjtlotcn@gilmour
This commit is contained in:
Dave Airlie 2021-04-07 17:32:05 +10:00
Родитель fb457e02f0 6c74498300
Коммит 1539f71602
90 изменённых файлов: 4725 добавлений и 578 удалений

Просмотреть файл

@ -34,6 +34,15 @@ properties:
description: used for reset chip control, RESET_N pin B7.
maxItems: 1
vdd10-supply:
description: Regulator that provides the supply 1.0V power.
vdd18-supply:
description: Regulator that provides the supply 1.8V power.
vdd33-supply:
description: Regulator that provides the supply 3.3V power.
ports:
$ref: /schemas/graph.yaml#/properties/ports
@ -55,6 +64,9 @@ properties:
required:
- compatible
- reg
- vdd10-supply
- vdd18-supply
- vdd33-supply
- ports
additionalProperties: false
@ -72,6 +84,9 @@ examples:
reg = <0x58>;
enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
vdd10-supply = <&pp1000_mipibrdg>;
vdd18-supply = <&pp1800_mipibrdg>;
vdd33-supply = <&pp3300_mipibrdg>;
ports {
#address-cells = <1>;

Просмотреть файл

@ -0,0 +1,99 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/chipone,icn6211.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Chipone ICN6211 MIPI-DSI to RGB Converter bridge
maintainers:
- Jagan Teki <jagan@amarulasolutions.com>
description: |
ICN6211 is MIPI-DSI to RGB Converter bridge from chipone.
It has a flexible configuration of MIPI DSI signal input and
produce RGB565, RGB666, RGB888 output format.
properties:
compatible:
enum:
- chipone,icn6211
reg:
maxItems: 1
description: virtual channel number of a DSI peripheral
enable-gpios:
description: Bridge EN pin, chip is reset when EN is low.
vdd1-supply:
description: A 1.8V/2.5V/3.3V supply that power the MIPI RX.
vdd2-supply:
description: A 1.8V/2.5V/3.3V supply that power the PLL.
vdd3-supply:
description: A 1.8V/2.5V/3.3V supply that power the RGB output.
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Video port for MIPI DSI input
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
Video port for MIPI DPI output (panel or connector).
required:
- port@0
- port@1
required:
- compatible
- reg
- enable-gpios
- ports
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
bridge@0 {
compatible = "chipone,icn6211";
reg = <0>;
enable-gpios = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* LCD-RST: PL5 */
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
bridge_in_dsi: endpoint {
remote-endpoint = <&dsi_out_bridge>;
};
};
port@1 {
reg = <1>;
bridge_out_panel: endpoint {
remote-endpoint = <&panel_out_bridge>;
};
};
};
};
};

Просмотреть файл

@ -0,0 +1,102 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/lontium,lt8912b.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lontium LT8912B MIPI to HDMI Bridge
maintainers:
- Adrien Grassein <adrien.grassein@gmail.com>
description: |
The LT8912B is a bridge device which convert DSI to HDMI
properties:
compatible:
enum:
- lontium,lt8912b
reg:
maxItems: 1
reset-gpios:
maxItems: 1
description: GPIO connected to active high RESET pin.
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
Primary MIPI port for MIPI input
properties:
endpoint:
$ref: /schemas/media/video-interfaces.yaml#
unevaluatedProperties: false
properties:
data-lanes: true
required:
- data-lanes
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: |
HDMI port, should be connected to a node compatible with the
hdmi-connector binding.
required:
- port@0
- port@1
required:
- compatible
- reg
- reset-gpios
- ports
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
i2c4 {
#address-cells = <1>;
#size-cells = <0>;
hdmi-bridge@48 {
compatible = "lontium,lt8912b";
reg = <0x48>;
reset-gpios = <&max7323 0 GPIO_ACTIVE_LOW>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
hdmi_out_in: endpoint {
data-lanes = <0 1 2 3>;
remote-endpoint = <&mipi_dsi_out>;
};
};
port@1 {
reg = <1>;
endpoint {
remote-endpoint = <&hdmi_in>;
};
};
};
};
};
...

Просмотреть файл

@ -161,6 +161,8 @@ properties:
# Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
- innolux,g121x1-l03
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bca-ea1
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bge
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
- innolux,n125hce-gn1

Просмотреть файл

@ -257,3 +257,79 @@ fences in the kernel. This means:
userspace is allowed to use userspace fencing or long running compute
workloads. This also means no implicit fencing for shared buffers in these
cases.
Recoverable Hardware Page Faults Implications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Modern hardware supports recoverable page faults, which has a lot of
implications for DMA fences.
First, a pending page fault obviously holds up the work that's running on the
accelerator and a memory allocation is usually required to resolve the fault.
But memory allocations are not allowed to gate completion of DMA fences, which
means any workload using recoverable page faults cannot use DMA fences for
synchronization. Synchronization fences controlled by userspace must be used
instead.
On GPUs this poses a problem, because current desktop compositor protocols on
Linux rely on DMA fences, which means without an entirely new userspace stack
built on top of userspace fences, they cannot benefit from recoverable page
faults. Specifically this means implicit synchronization will not be possible.
The exception is when page faults are only used as migration hints and never to
on-demand fill a memory request. For now this means recoverable page
faults on GPUs are limited to pure compute workloads.
Furthermore GPUs usually have shared resources between the 3D rendering and
compute side, like compute units or command submission engines. If both a 3D
job with a DMA fence and a compute workload using recoverable page faults are
pending they could deadlock:
- The 3D workload might need to wait for the compute job to finish and release
hardware resources first.
- The compute workload might be stuck in a page fault, because the memory
allocation is waiting for the DMA fence of the 3D workload to complete.
There are a few options to prevent this problem, one of which drivers need to
ensure:
- Compute workloads can always be preempted, even when a page fault is pending
and not yet repaired. Not all hardware supports this.
- DMA fence workloads and workloads which need page fault handling have
independent hardware resources to guarantee forward progress. This could be
achieved through e.g. through dedicated engines and minimal compute unit
reservations for DMA fence workloads.
- The reservation approach could be further refined by only reserving the
hardware resources for DMA fence workloads when they are in-flight. This must
cover the time from when the DMA fence is visible to other threads up to
moment when fence is completed through dma_fence_signal().
- As a last resort, if the hardware provides no useful reservation mechanics,
all workloads must be flushed from the GPU when switching between jobs
requiring DMA fences or jobs requiring page fault handling: This means all DMA
fences must complete before a compute job with page fault handling can be
inserted into the scheduler queue. And vice versa, before a DMA fence can be
made visible anywhere in the system, all compute workloads must be preempted
to guarantee all pending GPU page faults are flushed.
- Only a fairly theoretical option would be to untangle these dependencies when
allocating memory to repair hardware page faults, either through separate
memory blocks or runtime tracking of the full dependency graph of all DMA
fences. This results very wide impact on the kernel, since resolving the page
on the CPU side can itself involve a page fault. It is much more feasible and
robust to limit the impact of handling hardware page faults to the specific
driver.
Note that workloads that run on independent hardware like copy engines or other
GPUs do not have any impact. This allows us to keep using DMA fences internally
in the kernel even for resolving hardware page faults, e.g. by using copy
engines to clear or copy memory needed to resolve the page fault.
In some ways this page fault problem is a special case of the `Infinite DMA
Fences` discussions: Infinite fences from compute workloads are allowed to
depend on DMA fences, but not the other way around. And not even the page fault
problem is new, because some other CPU thread in userspace might
hit a page fault which holds up a userspace fence - supporting page faults on
GPUs doesn't anything fundamentally new.

Просмотреть файл

@ -677,7 +677,7 @@ Outside DRM
Convert fbdev drivers to DRM
----------------------------
There are plenty of fbdev drivers for older hardware. Some hwardware has
There are plenty of fbdev drivers for older hardware. Some hardware has
become obsolete, but some still provides good(-enough) framebuffers. The
drivers that are still useful should be converted to DRM and afterwards
removed from fbdev.

Просмотреть файл

@ -5568,6 +5568,12 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
F: drivers/gpu/drm/panel/panel-boe-himax8279d.c
DRM DRIVER FOR CHIPONE ICN6211 MIPI-DSI to RGB CONVERTER BRIDGE
M: Jagan Teki <jagan@amarulasolutions.com>
S: Maintained
F: Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml
F: drivers/gpu/drm/bridge/chipone-icn6211.c
DRM DRIVER FOR FARADAY TVE200 TV ENCODER
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
@ -5586,6 +5592,14 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
F: drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
DRM DRIVER FOR GENERIC USB DISPLAY
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/gud/wiki
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/gud/
F: include/drm/gud.h
DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
M: Hans de Goede <hdegoede@redhat.com>
S: Maintained
@ -5894,6 +5908,7 @@ F: drivers/gpu/drm/atmel-hlcdc/
DRM DRIVERS FOR BRIDGE CHIPS
M: Andrzej Hajda <a.hajda@samsung.com>
M: Neil Armstrong <narmstrong@baylibre.com>
M: Robert Foss <robert.foss@linaro.org>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@siol.net>
@ -10460,6 +10475,12 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
F: drivers/hid/hid-lg-g15.c
LONTIUM LT8912B MIPI TO HDMI BRIDGE
M: Adrien Grassein <adrien.grassein@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
F: drivers/gpu/drm/bridge/lontium-lt8912b.c
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>

Просмотреть файл

@ -384,6 +384,8 @@ source "drivers/gpu/drm/tidss/Kconfig"
source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
# Keep legacy drivers last
menuconfig DRM_LEGACY

Просмотреть файл

@ -7,7 +7,7 @@ drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \
drm_encoder_slave.o \
drm_trace_points.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
@ -125,3 +125,4 @@ obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/

Просмотреть файл

@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
spin_lock(&ttm_glob.lru_lock);
spin_lock(&adev->mman.bdev.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&adev->mman.bdev.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&ttm_glob.lru_lock);
spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&adev->mman.bdev.lru_lock);
vm->bulk_moveable = true;
}

Просмотреть файл

@ -27,6 +27,19 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
config DRM_CHIPONE_ICN6211
tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge"
depends on OF
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
help
ICN6211 is MIPI-DSI/RGB Converter bridge from chipone.
It has a flexible configuration of MIPI DSI signal input
and produce RGB565, RGB666, RGB888 output format.
If in doubt, say "N".
config DRM_CHRONTEL_CH7033
tristate "Chrontel CH7033 Video Encoder"
depends on OF
@ -48,6 +61,20 @@ config DRM_DISPLAY_CONNECTOR
on ARM-based platforms. Saying Y here when this driver is not needed
will not cause any issue.
config DRM_LONTIUM_LT8912B
tristate "Lontium LT8912B DSI/HDMI bridge"
depends on OF
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select REGMAP_I2C
help
Driver for Lontium LT8912B DSI to HDMI bridge
chip driver.
Please say Y if you have such hardware.
Say M here if you want to support this hardware as a module.
The module will be named "lontium-lt8912b".
config DRM_LONTIUM_LT9611
tristate "Lontium LT9611 DSI/HDMI bridge"
select SND_SOC_HDMI_CODEC if SND_SOC

Просмотреть файл

@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o

Просмотреть файл

@ -550,28 +550,38 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
DRM_MODE_CONNECTOR_eDP);
if (err) {
DRM_ERROR("Failed to initialize connector: %d\n", err);
return err;
goto aux_unregister;
}
drm_connector_helper_add(&anx6345->connector,
&anx6345_connector_helper_funcs);
err = drm_connector_register(&anx6345->connector);
if (err) {
DRM_ERROR("Failed to register connector: %d\n", err);
return err;
}
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
bridge->encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
return err;
goto connector_cleanup;
}
err = drm_connector_register(&anx6345->connector);
if (err) {
DRM_ERROR("Failed to register connector: %d\n", err);
goto connector_cleanup;
}
return 0;
connector_cleanup:
drm_connector_cleanup(&anx6345->connector);
aux_unregister:
drm_dp_aux_unregister(&anx6345->aux);
return err;
}
static void anx6345_bridge_detach(struct drm_bridge *bridge)
{
drm_dp_aux_unregister(&bridge_to_anx6345(bridge)->aux);
}
static enum drm_mode_status
@ -624,6 +634,7 @@ static void anx6345_bridge_enable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs anx6345_bridge_funcs = {
.attach = anx6345_bridge_attach,
.detach = anx6345_bridge_detach,
.mode_valid = anx6345_bridge_mode_valid,
.disable = anx6345_bridge_disable,
.enable = anx6345_bridge_enable,

Просмотреть файл

@ -918,28 +918,38 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
DRM_MODE_CONNECTOR_DisplayPort);
if (err) {
DRM_ERROR("Failed to initialize connector: %d\n", err);
return err;
goto aux_unregister;
}
drm_connector_helper_add(&anx78xx->connector,
&anx78xx_connector_helper_funcs);
err = drm_connector_register(&anx78xx->connector);
if (err) {
DRM_ERROR("Failed to register connector: %d\n", err);
return err;
}
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
bridge->encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
return err;
goto connector_cleanup;
}
err = drm_connector_register(&anx78xx->connector);
if (err) {
DRM_ERROR("Failed to register connector: %d\n", err);
goto connector_cleanup;
}
return 0;
connector_cleanup:
drm_connector_cleanup(&anx78xx->connector);
aux_unregister:
drm_dp_aux_unregister(&anx78xx->aux);
return err;
}
static void anx78xx_bridge_detach(struct drm_bridge *bridge)
{
drm_dp_aux_unregister(&bridge_to_anx78xx(bridge)->aux);
}
static enum drm_mode_status
@ -1013,6 +1023,7 @@ static void anx78xx_bridge_enable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs anx78xx_bridge_funcs = {
.attach = anx78xx_bridge_attach,
.detach = anx78xx_bridge_detach,
.mode_valid = anx78xx_bridge_mode_valid,
.disable = anx78xx_bridge_disable,
.mode_set = anx78xx_bridge_mode_set,

Просмотреть файл

@ -1782,6 +1782,7 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
err_disable_pm_runtime:
pm_runtime_disable(dp->dev);
drm_dp_aux_unregister(&dp->aux);
return ret;
}

Просмотреть файл

@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@ -875,12 +876,25 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
static void anx7625_power_on(struct anx7625_data *ctx)
{
struct device *dev = &ctx->client->dev;
int ret, i;
if (!ctx->pdata.low_power_mode) {
DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
return;
}
for (i = 0; i < ARRAY_SIZE(ctx->pdata.supplies); i++) {
ret = regulator_enable(ctx->pdata.supplies[i].consumer);
if (ret < 0) {
DRM_DEV_DEBUG_DRIVER(dev, "cannot enable supply %d: %d\n",
i, ret);
goto reg_err;
}
usleep_range(2000, 2100);
}
usleep_range(4000, 4100);
/* Power on pin enable */
gpiod_set_value(ctx->pdata.gpio_p_on, 1);
usleep_range(10000, 11000);
@ -889,11 +903,16 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(10000, 11000);
DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
return;
reg_err:
for (--i; i >= 0; i--)
regulator_disable(ctx->pdata.supplies[i].consumer);
}
static void anx7625_power_standby(struct anx7625_data *ctx)
{
struct device *dev = &ctx->client->dev;
int ret;
if (!ctx->pdata.low_power_mode) {
DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
@ -904,6 +923,12 @@ static void anx7625_power_standby(struct anx7625_data *ctx)
usleep_range(1000, 1100);
gpiod_set_value(ctx->pdata.gpio_p_on, 0);
usleep_range(1000, 1100);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies),
ctx->pdata.supplies);
if (ret < 0)
DRM_DEV_DEBUG_DRIVER(dev, "cannot disable supplies %d\n", ret);
DRM_DEV_DEBUG_DRIVER(dev, "power down\n");
}
@ -1742,6 +1767,15 @@ static int anx7625_i2c_probe(struct i2c_client *client,
platform->client = client;
i2c_set_clientdata(client, platform);
pdata->supplies[0].supply = "vdd10";
pdata->supplies[1].supply = "vdd18";
pdata->supplies[2].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pdata->supplies),
pdata->supplies);
if (ret) {
DRM_DEV_ERROR(dev, "fail to get power supplies: %d\n", ret);
return ret;
}
anx7625_init_gpio(platform);
atomic_set(&platform->power_status, 0);

Просмотреть файл

@ -350,6 +350,7 @@ struct s_edid_data {
struct anx7625_platform_data {
struct gpio_desc *gpio_p_on;
struct gpio_desc *gpio_reset;
struct regulator_bulk_data supplies[3];
struct drm_bridge *panel_bridge;
int intp_irq;
u32 low_power_mode;

Просмотреть файл

@ -0,0 +1,293 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2020 Amarula Solutions(India)
* Author: Jagan Teki <jagan@amarulasolutions.com>
*/
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_mipi_dsi.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#define HACTIVE_LI 0x20
#define VACTIVE_LI 0x21
#define VACTIVE_HACTIVE_HI 0x22
#define HFP_LI 0x23
#define HSYNC_LI 0x24
#define HBP_LI 0x25
#define HFP_HSW_HBP_HI 0x26
#define VFP 0x27
#define VSYNC 0x28
#define VBP 0x29
struct chipone {
struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
struct gpio_desc *enable_gpio;
struct regulator *vdd1;
struct regulator *vdd2;
struct regulator *vdd3;
};
static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
{
return container_of(bridge, struct chipone, bridge);
}
static struct drm_display_mode *bridge_to_mode(struct drm_bridge *bridge)
{
return &bridge->encoder->crtc->state->adjusted_mode;
}
static inline int chipone_dsi_write(struct chipone *icn, const void *seq,
size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(icn->dev);
return mipi_dsi_generic_write(dsi, seq, len);
}
#define ICN6211_DSI(icn, seq...) \
{ \
const u8 d[] = { seq }; \
chipone_dsi_write(icn, d, ARRAY_SIZE(d)); \
}
static void chipone_enable(struct drm_bridge *bridge)
{
struct chipone *icn = bridge_to_chipone(bridge);
struct drm_display_mode *mode = bridge_to_mode(bridge);
ICN6211_DSI(icn, 0x7a, 0xc1);
ICN6211_DSI(icn, HACTIVE_LI, mode->hdisplay & 0xff);
ICN6211_DSI(icn, VACTIVE_LI, mode->vdisplay & 0xff);
/**
* lsb nibble: 2nd nibble of hdisplay
* msb nibble: 2nd nibble of vdisplay
*/
ICN6211_DSI(icn, VACTIVE_HACTIVE_HI,
((mode->hdisplay >> 8) & 0xf) |
(((mode->vdisplay >> 8) & 0xf) << 4));
ICN6211_DSI(icn, HFP_LI, mode->hsync_start - mode->hdisplay);
ICN6211_DSI(icn, HSYNC_LI, mode->hsync_end - mode->hsync_start);
ICN6211_DSI(icn, HBP_LI, mode->htotal - mode->hsync_end);
ICN6211_DSI(icn, HFP_HSW_HBP_HI, 0x00);
ICN6211_DSI(icn, VFP, mode->vsync_start - mode->vdisplay);
ICN6211_DSI(icn, VSYNC, mode->vsync_end - mode->vsync_start);
ICN6211_DSI(icn, VBP, mode->vtotal - mode->vsync_end);
/* dsi specific sequence */
ICN6211_DSI(icn, MIPI_DCS_SET_TEAR_OFF, 0x80);
ICN6211_DSI(icn, MIPI_DCS_SET_ADDRESS_MODE, 0x28);
ICN6211_DSI(icn, 0xb5, 0xa0);
ICN6211_DSI(icn, 0x5c, 0xff);
ICN6211_DSI(icn, MIPI_DCS_SET_COLUMN_ADDRESS, 0x01);
ICN6211_DSI(icn, MIPI_DCS_GET_POWER_SAVE, 0x92);
ICN6211_DSI(icn, 0x6b, 0x71);
ICN6211_DSI(icn, 0x69, 0x2b);
ICN6211_DSI(icn, MIPI_DCS_ENTER_SLEEP_MODE, 0x40);
ICN6211_DSI(icn, MIPI_DCS_EXIT_SLEEP_MODE, 0x98);
/* icn6211 specific sequence */
ICN6211_DSI(icn, 0xb6, 0x20);
ICN6211_DSI(icn, 0x51, 0x20);
ICN6211_DSI(icn, 0x09, 0x10);
usleep_range(10000, 11000);
}
static void chipone_pre_enable(struct drm_bridge *bridge)
{
struct chipone *icn = bridge_to_chipone(bridge);
int ret;
if (icn->vdd1) {
ret = regulator_enable(icn->vdd1);
if (ret)
DRM_DEV_ERROR(icn->dev,
"failed to enable VDD1 regulator: %d\n", ret);
}
if (icn->vdd2) {
ret = regulator_enable(icn->vdd2);
if (ret)
DRM_DEV_ERROR(icn->dev,
"failed to enable VDD2 regulator: %d\n", ret);
}
if (icn->vdd3) {
ret = regulator_enable(icn->vdd3);
if (ret)
DRM_DEV_ERROR(icn->dev,
"failed to enable VDD3 regulator: %d\n", ret);
}
gpiod_set_value(icn->enable_gpio, 1);
usleep_range(10000, 11000);
}
static void chipone_post_disable(struct drm_bridge *bridge)
{
struct chipone *icn = bridge_to_chipone(bridge);
if (icn->vdd1)
regulator_disable(icn->vdd1);
if (icn->vdd2)
regulator_disable(icn->vdd2);
if (icn->vdd3)
regulator_disable(icn->vdd3);
gpiod_set_value(icn->enable_gpio, 0);
}
static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
}
static const struct drm_bridge_funcs chipone_bridge_funcs = {
.attach = chipone_attach,
.post_disable = chipone_post_disable,
.pre_enable = chipone_pre_enable,
.enable = chipone_enable,
};
static int chipone_parse_dt(struct chipone *icn)
{
struct device *dev = icn->dev;
struct drm_panel *panel;
int ret;
icn->vdd1 = devm_regulator_get_optional(dev, "vdd1");
if (IS_ERR(icn->vdd1)) {
ret = PTR_ERR(icn->vdd1);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
icn->vdd1 = NULL;
DRM_DEV_DEBUG(dev, "failed to get VDD1 regulator: %d\n", ret);
}
icn->vdd2 = devm_regulator_get_optional(dev, "vdd2");
if (IS_ERR(icn->vdd2)) {
ret = PTR_ERR(icn->vdd2);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
icn->vdd2 = NULL;
DRM_DEV_DEBUG(dev, "failed to get VDD2 regulator: %d\n", ret);
}
icn->vdd3 = devm_regulator_get_optional(dev, "vdd3");
if (IS_ERR(icn->vdd3)) {
ret = PTR_ERR(icn->vdd3);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
icn->vdd3 = NULL;
DRM_DEV_DEBUG(dev, "failed to get VDD3 regulator: %d\n", ret);
}
icn->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(icn->enable_gpio)) {
DRM_DEV_ERROR(dev, "failed to get enable GPIO\n");
return PTR_ERR(icn->enable_gpio);
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret)
return ret;
icn->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(icn->panel_bridge))
return PTR_ERR(icn->panel_bridge);
return 0;
}
static int chipone_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct chipone *icn;
int ret;
icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL);
if (!icn)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, icn);
icn->dev = dev;
ret = chipone_parse_dt(icn);
if (ret)
return ret;
icn->bridge.funcs = &chipone_bridge_funcs;
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
drm_bridge_add(&icn->bridge);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
drm_bridge_remove(&icn->bridge);
dev_err(dev, "failed to attach dsi\n");
}
return ret;
}
static int chipone_remove(struct mipi_dsi_device *dsi)
{
struct chipone *icn = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&icn->bridge);
return 0;
}
static const struct of_device_id chipone_of_match[] = {
{ .compatible = "chipone,icn6211", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, chipone_of_match);
static struct mipi_dsi_driver chipone_driver = {
.probe = chipone_probe,
.remove = chipone_remove,
.driver = {
.name = "chipone-icn6211",
.owner = THIS_MODULE,
.of_match_table = chipone_of_match,
},
};
module_mipi_dsi_driver(chipone_driver);
MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
MODULE_DESCRIPTION("Chipone ICN6211 MIPI-DSI to RGB Converter Bridge");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -0,0 +1,765 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <video/videomode.h>
#define I2C_MAIN 0
#define I2C_ADDR_MAIN 0x48
#define I2C_CEC_DSI 1
#define I2C_ADDR_CEC_DSI 0x49
#define I2C_MAX_IDX 2
struct lt8912 {
struct device *dev;
struct drm_bridge bridge;
struct drm_connector connector;
struct i2c_client *i2c_client[I2C_MAX_IDX];
struct regmap *regmap[I2C_MAX_IDX];
struct device_node *host_node;
struct drm_bridge *hdmi_port;
struct mipi_dsi_device *dsi;
struct gpio_desc *gp_reset;
struct videomode mode;
u8 data_lanes;
bool is_power_on;
bool is_attached;
};
static int lt8912_write_init_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
/* Digital clock en*/
{0x08, 0xff},
{0x09, 0xff},
{0x0a, 0xff},
{0x0b, 0x7c},
{0x0c, 0xff},
{0x42, 0x04},
/*Tx Analog*/
{0x31, 0xb1},
{0x32, 0xb1},
{0x33, 0x0e},
{0x37, 0x00},
{0x38, 0x22},
{0x60, 0x82},
/*Cbus Analog*/
{0x39, 0x45},
{0x3a, 0x00},
{0x3b, 0x00},
/*HDMI Pll Analog*/
{0x44, 0x31},
{0x55, 0x44},
{0x57, 0x01},
{0x5a, 0x02},
/*MIPI Analog*/
{0x3e, 0xd6},
{0x3f, 0xd4},
{0x41, 0x3c},
{0xB2, 0x00},
};
return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq));
}
static int lt8912_write_mipi_basic_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
{0x12, 0x04},
{0x14, 0x00},
{0x15, 0x00},
{0x1a, 0x03},
{0x1b, 0x03},
};
return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
};
static int lt8912_write_dds_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
{0x4e, 0xff},
{0x4f, 0x56},
{0x50, 0x69},
{0x51, 0x80},
{0x1f, 0x5e},
{0x20, 0x01},
{0x21, 0x2c},
{0x22, 0x01},
{0x23, 0xfa},
{0x24, 0x00},
{0x25, 0xc8},
{0x26, 0x00},
{0x27, 0x5e},
{0x28, 0x01},
{0x29, 0x2c},
{0x2a, 0x01},
{0x2b, 0xfa},
{0x2c, 0x00},
{0x2d, 0xc8},
{0x2e, 0x00},
{0x42, 0x64},
{0x43, 0x00},
{0x44, 0x04},
{0x45, 0x00},
{0x46, 0x59},
{0x47, 0x00},
{0x48, 0xf2},
{0x49, 0x06},
{0x4a, 0x00},
{0x4b, 0x72},
{0x4c, 0x45},
{0x4d, 0x00},
{0x52, 0x08},
{0x53, 0x00},
{0x54, 0xb2},
{0x55, 0x00},
{0x56, 0xe4},
{0x57, 0x0d},
{0x58, 0x00},
{0x59, 0xe4},
{0x5a, 0x8a},
{0x5b, 0x00},
{0x5c, 0x34},
{0x1e, 0x4f},
{0x51, 0x00},
};
return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
}
static int lt8912_write_rxlogicres_config(struct lt8912 *lt)
{
int ret;
ret = regmap_write(lt->regmap[I2C_MAIN], 0x03, 0x7f);
usleep_range(10000, 20000);
ret |= regmap_write(lt->regmap[I2C_MAIN], 0x03, 0xff);
return ret;
};
static int lt8912_write_lvds_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
{0x44, 0x30},
{0x51, 0x05},
{0x50, 0x24},
{0x51, 0x2d},
{0x52, 0x04},
{0x69, 0x0e},
{0x69, 0x8e},
{0x6a, 0x00},
{0x6c, 0xb8},
{0x6b, 0x51},
{0x04, 0xfb},
{0x04, 0xff},
{0x7f, 0x00},
{0xa8, 0x13},
{0x02, 0xf7},
{0x02, 0xff},
{0x03, 0xcf},
{0x03, 0xff},
};
return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
};
static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b)
{
return container_of(b, struct lt8912, bridge);
}
static inline struct lt8912 *connector_to_lt8912(struct drm_connector *c)
{
return container_of(c, struct lt8912, connector);
}
static const struct regmap_config lt8912_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
};
static int lt8912_init_i2c(struct lt8912 *lt, struct i2c_client *client)
{
unsigned int i;
/*
* At this time we only initialize 2 chips, but the lt8912 provides
* a third interface for the audio over HDMI configuration.
*/
struct i2c_board_info info[] = {
{ I2C_BOARD_INFO("lt8912p0", I2C_ADDR_MAIN), },
{ I2C_BOARD_INFO("lt8912p1", I2C_ADDR_CEC_DSI), },
};
if (!lt)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(info); i++) {
if (i > 0) {
lt->i2c_client[i] = i2c_new_dummy_device(client->adapter,
info[i].addr);
if (IS_ERR(lt->i2c_client[i]))
return PTR_ERR(lt->i2c_client[i]);
}
lt->regmap[i] = devm_regmap_init_i2c(lt->i2c_client[i],
&lt8912_regmap_config);
if (IS_ERR(lt->regmap[i]))
return PTR_ERR(lt->regmap[i]);
}
return 0;
}
static int lt8912_free_i2c(struct lt8912 *lt)
{
unsigned int i;
for (i = 1; i < I2C_MAX_IDX; i++)
i2c_unregister_device(lt->i2c_client[i]);
return 0;
}
static int lt8912_hard_power_on(struct lt8912 *lt)
{
gpiod_set_value_cansleep(lt->gp_reset, 0);
msleep(20);
return 0;
}
static void lt8912_hard_power_off(struct lt8912 *lt)
{
gpiod_set_value_cansleep(lt->gp_reset, 1);
msleep(20);
lt->is_power_on = false;
}
static int lt8912_video_setup(struct lt8912 *lt)
{
u32 hactive, h_total, hpw, hfp, hbp;
u32 vactive, v_total, vpw, vfp, vbp;
u8 settle = 0x08;
int ret;
if (!lt)
return -EINVAL;
hactive = lt->mode.hactive;
hfp = lt->mode.hfront_porch;
hpw = lt->mode.hsync_len;
hbp = lt->mode.hback_porch;
h_total = hactive + hfp + hpw + hbp;
vactive = lt->mode.vactive;
vfp = lt->mode.vfront_porch;
vpw = lt->mode.vsync_len;
vbp = lt->mode.vback_porch;
v_total = vactive + vfp + vpw + vbp;
if (vactive <= 600)
settle = 0x04;
else if (vactive == 1080)
settle = 0x0a;
ret = regmap_write(lt->regmap[I2C_CEC_DSI], 0x10, 0x01);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x11, settle);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x18, hpw);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x19, vpw);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x1c, hactive & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x1d, hactive >> 8);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x2f, 0x0c);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x34, h_total & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x35, h_total >> 8);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x36, v_total & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x37, v_total >> 8);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x38, vbp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x39, vbp >> 8);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3a, vfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3b, vfp >> 8);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3c, hbp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3d, hbp >> 8);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8);
return ret;
}
static int lt8912_soft_power_on(struct lt8912 *lt)
{
if (!lt->is_power_on) {
u32 lanes = lt->data_lanes;
lt8912_write_init_config(lt);
regmap_write(lt->regmap[I2C_CEC_DSI], 0x13, lanes & 3);
lt8912_write_mipi_basic_config(lt);
lt->is_power_on = true;
}
return 0;
}
static int lt8912_video_on(struct lt8912 *lt)
{
int ret;
ret = lt8912_video_setup(lt);
if (ret < 0)
goto end;
ret = lt8912_write_dds_config(lt);
if (ret < 0)
goto end;
ret = lt8912_write_rxlogicres_config(lt);
if (ret < 0)
goto end;
ret = lt8912_write_lvds_config(lt);
if (ret < 0)
goto end;
end:
return ret;
}
static enum drm_connector_status lt8912_check_cable_status(struct lt8912 *lt)
{
int ret;
unsigned int reg_val;
ret = regmap_read(lt->regmap[I2C_MAIN], 0xC1, &reg_val);
if (ret)
return connector_status_unknown;
if (reg_val & BIT(7))
return connector_status_connected;
return connector_status_disconnected;
}
static enum drm_connector_status
lt8912_connector_detect(struct drm_connector *connector, bool force)
{
struct lt8912 *lt = connector_to_lt8912(connector);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
return drm_bridge_detect(lt->hdmi_port);
return lt8912_check_cable_status(lt);
}
static const struct drm_connector_funcs lt8912_connector_funcs = {
.detect = lt8912_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static enum drm_mode_status
lt8912_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > 150000)
return MODE_CLOCK_HIGH;
if (mode->hdisplay > 1920)
return MODE_BAD_HVALUE;
if (mode->vdisplay > 1080)
return MODE_BAD_VVALUE;
return MODE_OK;
}
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
struct edid *edid;
int ret = -1;
int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
edid = drm_bridge_get_edid(lt->hdmi_port, connector);
if (edid) {
drm_connector_update_edid_property(connector, edid);
num = drm_add_edid_modes(connector, edid);
} else {
return ret;
}
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
if (ret)
num = ret;
kfree(edid);
return num;
}
static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = {
.get_modes = lt8912_connector_get_modes,
.mode_valid = lt8912_connector_mode_valid,
};
static void lt8912_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adj)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
drm_display_mode_to_videomode(adj, &lt->mode);
}
static void lt8912_bridge_enable(struct drm_bridge *bridge)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
lt8912_video_on(lt);
}
static int lt8912_attach_dsi(struct lt8912 *lt)
{
struct device *dev = lt->dev;
struct mipi_dsi_host *host;
struct mipi_dsi_device *dsi;
int ret = -1;
const struct mipi_dsi_device_info info = { .type = "lt8912",
.channel = 0,
.node = NULL,
};
host = of_find_mipi_dsi_host_by_node(lt->host_node);
if (!host) {
dev_err(dev, "failed to find dsi host\n");
return -EPROBE_DEFER;
}
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) {
ret = PTR_ERR(dsi);
dev_err(dev, "failed to create dsi device (%d)\n", ret);
goto err_dsi_device;
}
lt->dsi = dsi;
dsi->lanes = lt->data_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM |
MIPI_DSI_MODE_EOT_PACKET;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "failed to attach dsi to host\n");
goto err_dsi_attach;
}
return 0;
err_dsi_attach:
mipi_dsi_device_unregister(dsi);
err_dsi_device:
return ret;
}
static void lt8912_detach_dsi(struct lt8912 *lt)
{
mipi_dsi_detach(lt->dsi);
mipi_dsi_device_unregister(lt->dsi);
}
static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
{
int ret;
struct lt8912 *lt = bridge_to_lt8912(bridge);
struct drm_connector *connector = &lt->connector;
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
ret = drm_connector_init(bridge->dev, connector,
&lt8912_connector_funcs,
lt->hdmi_port->type);
if (ret)
goto exit;
drm_connector_helper_add(connector, &lt8912_connector_helper_funcs);
connector->dpms = DRM_MODE_DPMS_OFF;
drm_connector_attach_encoder(connector, bridge->encoder);
exit:
return ret;
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
ret = lt8912_bridge_connector_init(bridge);
if (ret) {
dev_err(lt->dev, "Failed to init bridge ! (%d)\n", ret);
return ret;
}
}
ret = lt8912_hard_power_on(lt);
if (ret)
return ret;
ret = lt8912_soft_power_on(lt);
if (ret)
goto error;
ret = lt8912_attach_dsi(lt);
if (ret)
goto error;
lt->is_attached = true;
return 0;
error:
lt8912_hard_power_off(lt);
return ret;
}
static void lt8912_bridge_detach(struct drm_bridge *bridge)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->is_attached) {
lt8912_detach_dsi(lt);
lt8912_hard_power_off(lt);
drm_connector_unregister(&lt->connector);
drm_connector_cleanup(&lt->connector);
}
}
static enum drm_connector_status
lt8912_bridge_detect(struct drm_bridge *bridge)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT)
return drm_bridge_detect(lt->hdmi_port);
return lt8912_check_cable_status(lt);
}
static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
/*
* edid must be read through the ddc bus but it must be
* given to the hdmi connector node.
*/
if (lt->hdmi_port->ops & DRM_BRIDGE_OP_EDID)
return drm_bridge_get_edid(lt->hdmi_port, connector);
dev_warn(lt->dev, "The connected bridge does not supports DRM_BRIDGE_OP_EDID\n");
return NULL;
}
static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.attach = lt8912_bridge_attach,
.detach = lt8912_bridge_detach,
.mode_set = lt8912_bridge_mode_set,
.enable = lt8912_bridge_enable,
.detect = lt8912_bridge_detect,
.get_edid = lt8912_bridge_get_edid,
};
static int lt8912_parse_dt(struct lt8912 *lt)
{
struct gpio_desc *gp_reset;
struct device *dev = lt->dev;
int ret = 0;
struct device_node *port_node;
struct device_node *endpoint;
gp_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gp_reset)) {
ret = PTR_ERR(gp_reset);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get reset gpio: %d\n", ret);
return ret;
}
lt->gp_reset = gp_reset;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (IS_ERR(endpoint)) {
ret = PTR_ERR(endpoint);
goto end;
}
lt->data_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
of_node_put(endpoint);
lt->host_node = of_graph_get_remote_node(dev->of_node, 0, -1);
if (!lt->host_node) {
dev_err(lt->dev, "%s: Failed to get remote port\n", __func__);
ret = -ENODEV;
goto end;
}
port_node = of_graph_get_remote_node(dev->of_node, 1, -1);
if (!port_node) {
dev_err(lt->dev, "%s: Failed to get connector port\n", __func__);
ret = -ENODEV;
goto err_free_host_node;
}
lt->hdmi_port = of_drm_find_bridge(port_node);
if (IS_ERR(lt->hdmi_port)) {
dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
ret = PTR_ERR(lt->hdmi_port);
of_node_put(lt->host_node);
goto end;
}
if (!of_device_is_compatible(port_node, "hdmi-connector")) {
dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
ret = -EINVAL;
}
of_node_put(port_node);
end:
return ret;
err_free_host_node:
of_node_put(lt->host_node);
return ret;
}
static int lt8912_put_dt(struct lt8912 *lt)
{
of_node_put(lt->host_node);
return 0;
}
static int lt8912_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
static struct lt8912 *lt;
int ret = 0;
struct device *dev = &client->dev;
lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL);
if (!lt)
return -ENOMEM;
lt->dev = dev;
lt->i2c_client[0] = client;
ret = lt8912_parse_dt(lt);
if (ret)
goto err_dt_parse;
ret = lt8912_init_i2c(lt, client);
if (ret)
goto err_i2c;
i2c_set_clientdata(client, lt);
lt->bridge.funcs = &lt8912_bridge_funcs;
lt->bridge.of_node = dev->of_node;
lt->bridge.ops = (DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_DETECT);
drm_bridge_add(&lt->bridge);
return 0;
err_i2c:
lt8912_put_dt(lt);
err_dt_parse:
return ret;
}
static int lt8912_remove(struct i2c_client *client)
{
struct lt8912 *lt = i2c_get_clientdata(client);
lt8912_bridge_detach(&lt->bridge);
drm_bridge_remove(&lt->bridge);
lt8912_free_i2c(lt);
lt8912_put_dt(lt);
return 0;
}
static const struct of_device_id lt8912_dt_match[] = {
{.compatible = "lontium,lt8912b"},
{}
};
MODULE_DEVICE_TABLE(of, lt8912_dt_match);
static const struct i2c_device_id lt8912_id[] = {
{"lt8912", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, lt8912_id);
static struct i2c_driver lt8912_i2c_driver = {
.driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
.owner = THIS_MODULE,
},
.probe = lt8912_probe,
.remove = lt8912_remove,
.id_table = lt8912_id,
};
module_i2c_driver(lt8912_i2c_driver);
MODULE_AUTHOR("Adrien Grassein <adrien.grassein@gmail.com>");
MODULE_DESCRIPTION("lt8912 drm driver");
MODULE_LICENSE("GPL v2");

Просмотреть файл

@ -867,8 +867,14 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
return lt9611_mode ? MODE_OK : MODE_BAD;
if (!lt9611_mode)
return MODE_BAD;
else if (lt9611_mode->intfs > 1 && !lt9611->dsi1)
return MODE_PANEL;
else
return MODE_OK;
}
static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)

Просмотреть файл

@ -855,7 +855,7 @@ static ssize_t lt9611uxc_firmware_show(struct device *dev, struct device_attribu
{
struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%02x\n", lt9611uxc->fw_version);
return sysfs_emit(buf, "%02x\n", lt9611uxc->fw_version);
}
static DEVICE_ATTR_RW(lt9611uxc_firmware);

Просмотреть файл

@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
static void panel_bridge_detach(struct drm_bridge *bridge)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
/*
* Cleanup the connector if we know it was initialized.
*
* FIXME: This wouldn't be needed if the panel_bridge structure was
* allocated with drmm_kzalloc(). This might be tricky since the
* drm_device pointer can only be retrieved when the bridge is attached.
*/
if (connector->dev)
drm_connector_cleanup(connector);
}
static void panel_bridge_pre_enable(struct drm_bridge *bridge)

Просмотреть файл

@ -1414,11 +1414,15 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
ret = drm_dp_aux_register(&tc->aux);
if (ret < 0)
return ret;
/* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, tc->bridge.type);
if (ret)
return ret;
goto aux_unregister;
/* Don't poll if don't have HPD connected */
if (tc->hpd_pin >= 0) {
@ -1438,10 +1442,19 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
aux_unregister:
drm_dp_aux_unregister(&tc->aux);
return ret;
}
static void tc_bridge_detach(struct drm_bridge *bridge)
{
drm_dp_aux_unregister(&bridge_to_tc(bridge)->aux);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
.detach = tc_bridge_detach,
.mode_valid = tc_mode_valid,
.mode_set = tc_bridge_mode_set,
.enable = tc_bridge_enable,
@ -1680,9 +1693,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->aux.name = "TC358767 AUX i2c adapter";
tc->aux.dev = tc->dev;
tc->aux.transfer = tc_aux_transfer;
ret = drm_dp_aux_register(&tc->aux);
if (ret)
return ret;
drm_dp_aux_init(&tc->aux);
tc->bridge.funcs = &tc_bridge_funcs;
if (tc->hpd_pin >= 0)
@ -1702,7 +1713,6 @@ static int tc_remove(struct i2c_client *client)
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
drm_dp_aux_unregister(&tc->aux);
return 0;
}

Просмотреть файл

@ -362,12 +362,18 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
ret = drm_dp_aux_register(&pdata->aux);
if (ret < 0) {
drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret);
return ret;
}
ret = drm_connector_init(bridge->dev, &pdata->connector,
&ti_sn_bridge_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
return ret;
goto err_conn_init;
}
drm_connector_helper_add(&pdata->connector,
@ -424,9 +430,16 @@ err_dsi_attach:
mipi_dsi_device_unregister(dsi);
err_dsi_host:
drm_connector_cleanup(&pdata->connector);
err_conn_init:
drm_dp_aux_unregister(&pdata->aux);
return ret;
}
static void ti_sn_bridge_detach(struct drm_bridge *bridge)
{
drm_dp_aux_unregister(&bridge_to_ti_sn_bridge(bridge)->aux);
}
static void ti_sn_bridge_disable(struct drm_bridge *bridge)
{
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
@ -863,6 +876,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
.pre_enable = ti_sn_bridge_pre_enable,
.enable = ti_sn_bridge_enable,
.disable = ti_sn_bridge_disable,
@ -1287,7 +1301,7 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
pdata->aux.name = "ti-sn65dsi86-aux";
pdata->aux.dev = pdata->dev;
pdata->aux.transfer = ti_sn_aux_transfer;
drm_dp_aux_register(&pdata->aux);
drm_dp_aux_init(&pdata->aux);
pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = client->dev.of_node;

Просмотреть файл

@ -61,9 +61,9 @@
*
* This library also provides implementations for all the legacy driver
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
* drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
* various functions to implement set_property callbacks. New drivers must not
* implement these functions themselves but must use the provided helpers.
* drm_atomic_helper_disable_plane(), and the various functions to implement
* set_property callbacks. New drivers must not implement these functions
* themselves but must use the provided helpers.
*
* The atomic helper uses the same function table structures as all other
* modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
@ -592,11 +592,10 @@ mode_valid(struct drm_atomic_state *state)
*
* Drivers which set &drm_crtc_state.mode_changed (e.g. in their
* &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
* without a full modeset) _must_ call this function afterwards after that
* change. It is permitted to call this function multiple times for the same
* update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
* upon the adjusted dotclock for fifo space allocation and watermark
* computation.
* without a full modeset) _must_ call this function after that change. It is
* permitted to call this function multiple times for the same update, e.g.
* when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
* adjusted dotclock for fifo space allocation and watermark computation.
*
* RETURNS:
* Zero for success or -errno

Просмотреть файл

@ -94,6 +94,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
{ DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
{ DRM_MODE_CONNECTOR_SPI, "SPI" },
{ DRM_MODE_CONNECTOR_USB, "USB" },
};
void drm_connector_ida_init(void)

Просмотреть файл

@ -0,0 +1,132 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/drm_displayid.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
static int validate_displayid(const u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
const struct displayid_header *base;
base = (const struct displayid_header *)&displayid[idx];
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
return -EINVAL;
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
}
return 0;
}
static const u8 *drm_find_displayid_extension(const struct edid *edid,
int *length, int *idx,
int *ext_index)
{
const u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
int ret;
if (!displayid)
return NULL;
/* EDID extensions block checksum isn't for us */
*length = EDID_LENGTH - 1;
*idx = 1;
ret = validate_displayid(displayid, *length, *idx);
if (ret)
return NULL;
base = (const struct displayid_header *)&displayid[*idx];
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
}
void displayid_iter_edid_begin(const struct edid *edid,
struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
iter->edid = edid;
}
static const struct displayid_block *
displayid_iter_block(const struct displayid_iter *iter)
{
const struct displayid_block *block;
if (!iter->section)
return NULL;
block = (const struct displayid_block *)&iter->section[iter->idx];
if (iter->idx + sizeof(*block) <= iter->length &&
iter->idx + sizeof(*block) + block->num_bytes <= iter->length)
return block;
return NULL;
}
const struct displayid_block *
__displayid_iter_next(struct displayid_iter *iter)
{
const struct displayid_block *block;
if (!iter->edid)
return NULL;
if (iter->section) {
/* current block should always be valid */
block = displayid_iter_block(iter);
if (WARN_ON(!block)) {
iter->section = NULL;
iter->edid = NULL;
return NULL;
}
/* next block in section */
iter->idx += sizeof(*block) + block->num_bytes;
block = displayid_iter_block(iter);
if (block)
return block;
}
for (;;) {
iter->section = drm_find_displayid_extension(iter->edid,
&iter->length,
&iter->idx,
&iter->ext_index);
if (!iter->section) {
iter->edid = NULL;
return NULL;
}
iter->idx += sizeof(struct displayid_header);
block = displayid_iter_block(iter);
if (block)
return block;
}
}
void displayid_iter_end(struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}

Просмотреть файл

@ -4727,6 +4727,28 @@ static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
queue_work(system_long_wq, &mgr->tx_work);
}
/*
* Helper function for parsing DP device types into convenient strings
* for use with dp_mst_topology
*/
static const char *pdt_to_string(u8 pdt)
{
switch (pdt) {
case DP_PEER_DEVICE_NONE:
return "NONE";
case DP_PEER_DEVICE_SOURCE_OR_SST:
return "SOURCE OR SST";
case DP_PEER_DEVICE_MST_BRANCHING:
return "MST BRANCHING";
case DP_PEER_DEVICE_SST_SINK:
return "SST SINK";
case DP_PEER_DEVICE_DP_LEGACY_CONV:
return "DP LEGACY CONV";
default:
return "ERR";
}
}
static void drm_dp_mst_dump_mstb(struct seq_file *m,
struct drm_dp_mst_branch *mstb)
{
@ -4739,9 +4761,20 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
prefix[i] = '\t';
prefix[i] = '\0';
seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
list_for_each_entry(port, &mstb->ports, next) {
seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
prefix,
port->port_num,
port,
port->input ? "input" : "output",
pdt_to_string(port->pdt),
port->ddps,
port->ldps,
port->num_sdp_streams,
port->num_sdp_stream_sinks,
port->fec_capable ? "true" : "false",
port->connector);
if (port->mstb)
drm_dp_mst_dump_mstb(m, port->mstb);
}
@ -4794,33 +4827,37 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
mutex_unlock(&mgr->lock);
mutex_lock(&mgr->payload_lock);
seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
mgr->max_payloads);
seq_printf(m, "\n*** VCPI Info ***\n");
seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->proposed_vcpis[i]) {
char name[14];
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
fetch_monitor_name(mgr, port, name, sizeof(name));
seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
port->port_num, port->vcpi.vcpi,
seq_printf(m, "%10d%10d%10d%10d%20s\n",
i,
port->port_num,
port->vcpi.vcpi,
port->vcpi.num_slots,
(*name != 0) ? name : "Unknown");
(*name != 0) ? name : "Unknown");
} else
seq_printf(m, "vcpi %d:unused\n", i);
seq_printf(m, "%6d - Unused\n", i);
}
seq_printf(m, "\n*** Payload Info ***\n");
seq_printf(m, "| idx | state | start slot | # slots |\n");
for (i = 0; i < mgr->max_payloads; i++) {
seq_printf(m, "payload %d: %d, %d, %d\n",
seq_printf(m, "%10d%10d%15d%10d\n",
i,
mgr->payloads[i].payload_state,
mgr->payloads[i].start_slot,
mgr->payloads[i].num_slots);
}
mutex_unlock(&mgr->payload_lock);
seq_printf(m, "\n*** DPCD Info ***\n");
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
u8 buf[DP_PAYLOAD_TABLE_SIZE];

Просмотреть файл

@ -1585,8 +1585,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
{
int i;
@ -3241,10 +3239,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
static u8 *drm_find_edid_extension(const struct edid *edid,
int ext_id, int *ext_index)
const u8 *drm_find_edid_extension(const struct edid *edid,
int ext_id, int *ext_index)
{
u8 *edid_ext = NULL;
const u8 *edid_ext = NULL;
int i;
/* No EDID or EDID extensions */
@ -3253,7 +3251,7 @@ static u8 *drm_find_edid_extension(const struct edid *edid,
/* Find CEA extension */
for (i = *ext_index; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
edid_ext = (const u8 *)edid + EDID_LENGTH * (i + 1);
if (edid_ext[0] == ext_id)
break;
}
@ -3266,63 +3264,30 @@ static u8 *drm_find_edid_extension(const struct edid *edid,
return edid_ext;
}
static u8 *drm_find_displayid_extension(const struct edid *edid,
int *length, int *idx,
int *ext_index)
static const u8 *drm_find_cea_extension(const struct edid *edid)
{
u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT, ext_index);
struct displayid_hdr *base;
int ret;
if (!displayid)
return NULL;
/* EDID extensions block checksum isn't for us */
*length = EDID_LENGTH - 1;
*idx = 1;
ret = validate_displayid(displayid, *length, *idx);
if (ret)
return NULL;
base = (struct displayid_hdr *)&displayid[*idx];
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
}
static u8 *drm_find_cea_extension(const struct edid *edid)
{
int length, idx;
struct displayid_block *block;
u8 *cea;
u8 *displayid;
int ext_index;
const struct displayid_block *block;
struct displayid_iter iter;
const u8 *cea;
int ext_index = 0;
/* Look for a top level CEA extension block */
/* FIXME: make callers iterate through multiple CEA ext blocks? */
ext_index = 0;
cea = drm_find_edid_extension(edid, CEA_EXT, &ext_index);
if (cea)
return cea;
/* CEA blocks can also be found embedded in a DisplayID block */
ext_index = 0;
for (;;) {
displayid = drm_find_displayid_extension(edid, &length, &idx,
&ext_index);
if (!displayid)
return NULL;
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
if (block->tag == DATA_BLOCK_CTA)
return (u8 *)block;
displayid_iter_edid_begin(edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_CTA) {
cea = (const u8 *)block;
break;
}
}
displayid_iter_end(&iter);
return NULL;
return cea;
}
static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
@ -4503,8 +4468,8 @@ static void clear_eld(struct drm_connector *connector)
static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
uint8_t *eld = connector->eld;
u8 *cea;
u8 *db;
const u8 *cea;
const u8 *db;
int total_sad_count = 0;
int mnl;
int dbl;
@ -4600,7 +4565,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
{
int count = 0;
int i, start, end, dbl;
u8 *cea;
const u8 *cea;
cea = drm_find_cea_extension(edid);
if (!cea) {
@ -4619,7 +4584,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
}
for_each_cea_db(cea, i, start, end) {
u8 *db = &cea[i];
const u8 *db = &cea[i];
if (cea_db_tag(db) == AUDIO_BLOCK) {
int j;
@ -4631,7 +4596,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
if (!*sads)
return -ENOMEM;
for (j = 0; j < count; j++) {
u8 *sad = &db[1 + j * 3];
const u8 *sad = &db[1 + j * 3];
(*sads)[j].format = (sad[0] & 0x78) >> 3;
(*sads)[j].channels = sad[0] & 0x7;
@ -4755,7 +4720,7 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
const u8 *edid_ext;
int i;
int start_offset, end_offset;
@ -4793,7 +4758,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
*/
bool drm_detect_monitor_audio(struct edid *edid)
{
u8 *edid_ext;
const u8 *edid_ext;
int i, j;
bool has_audio = false;
int start_offset, end_offset;
@ -5287,32 +5252,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
return quirks;
}
static int validate_displayid(u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
struct displayid_hdr *base;
base = (struct displayid_hdr *)&displayid[idx];
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
return -EINVAL;
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
}
return 0;
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
struct displayid_detailed_timings_1 *timings)
{
@ -5359,7 +5298,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
}
static int add_displayid_detailed_1_modes(struct drm_connector *connector,
struct displayid_block *block)
const struct displayid_block *block)
{
struct displayid_detailed_timing_block *det = (struct displayid_detailed_timing_block *)block;
int i;
@ -5387,27 +5326,16 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
static int add_displayid_detailed_modes(struct drm_connector *connector,
struct edid *edid)
{
u8 *displayid;
int length, idx;
struct displayid_block *block;
const struct displayid_block *block;
struct displayid_iter iter;
int num_modes = 0;
int ext_index = 0;
for (;;) {
displayid = drm_find_displayid_extension(edid, &length, &idx,
&ext_index);
if (!displayid)
break;
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
switch (block->tag) {
case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
num_modes += add_displayid_detailed_1_modes(connector, block);
break;
}
}
displayid_iter_edid_begin(edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
}
displayid_iter_end(&iter);
return num_modes;
}
@ -6041,43 +5969,20 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
}
}
static void drm_displayid_parse_tiled(struct drm_connector *connector,
const u8 *displayid, int length, int idx)
{
const struct displayid_block *block;
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
block->tag, block->rev, block->num_bytes);
switch (block->tag) {
case DATA_BLOCK_TILED_DISPLAY:
drm_parse_tiled_block(connector, block);
break;
default:
DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
break;
}
}
}
void drm_update_tile_info(struct drm_connector *connector,
const struct edid *edid)
{
const void *displayid = NULL;
int ext_index = 0;
int length, idx;
const struct displayid_block *block;
struct displayid_iter iter;
connector->has_tile = false;
for (;;) {
displayid = drm_find_displayid_extension(edid, &length, &idx,
&ext_index);
if (!displayid)
break;
drm_displayid_parse_tiled(connector, displayid, length, idx);
displayid_iter_edid_begin(edid, &iter);
displayid_iter_for_each(block, &iter) {
if (block->tag == DATA_BLOCK_TILED_DISPLAY)
drm_parse_tiled_block(connector, block);
}
displayid_iter_end(&iter);
if (!connector->has_tile && connector->tile_group) {
drm_mode_put_tile_group(connector->dev, connector->tile_group);

Просмотреть файл

@ -624,6 +624,7 @@ static void output_poll_execute(struct work_struct *work)
struct drm_connector_list_iter conn_iter;
enum drm_connector_status old_status;
bool repoll = false, changed;
u64 old_epoch_counter;
if (!dev->mode_config.poll_enabled)
return;
@ -660,8 +661,9 @@ static void output_poll_execute(struct work_struct *work)
repoll = true;
old_epoch_counter = connector->epoch_counter;
connector->status = drm_helper_probe_detect(connector, NULL, false);
if (old_status != connector->status) {
if (old_epoch_counter != connector->epoch_counter) {
const char *old, *new;
/*
@ -690,6 +692,9 @@ static void output_poll_execute(struct work_struct *work)
connector->base.id,
connector->name,
old, new);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
connector->base.id, connector->name,
old_epoch_counter, connector->epoch_counter);
changed = true;
}

Просмотреть файл

@ -43,7 +43,7 @@
* property types and ranges.
*
* Properties don't store the current value directly, but need to be
* instatiated by attaching them to a &drm_mode_object with
* instantiated by attaching them to a &drm_mode_object with
* drm_object_attach_property().
*
* Property values are only 64bit. To support bigger piles of data (like gamma
@ -644,7 +644,7 @@ EXPORT_SYMBOL(drm_property_blob_get);
* @id: id of the blob property
*
* If successful, this takes an additional reference to the blob property.
* callers need to make sure to eventually unreference the returned property
* callers need to make sure to eventually unreferenced the returned property
* again, using drm_property_blob_put().
*
* Return:

Просмотреть файл

@ -156,8 +156,8 @@ static ssize_t status_show(struct device *device,
status = READ_ONCE(connector->status);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
return sysfs_emit(buf, "%s\n",
drm_get_connector_status_name(status));
}
static ssize_t dpms_show(struct device *device,
@ -169,8 +169,7 @@ static ssize_t dpms_show(struct device *device,
dpms = READ_ONCE(connector->dpms);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_dpms_name(dpms));
return sysfs_emit(buf, "%s\n", drm_get_dpms_name(dpms));
}
static ssize_t enabled_show(struct device *device,
@ -182,7 +181,7 @@ static ssize_t enabled_show(struct device *device,
enabled = READ_ONCE(connector->encoder);
return snprintf(buf, PAGE_SIZE, enabled ? "enabled\n" : "disabled\n");
return sysfs_emit(buf, enabled ? "enabled\n" : "disabled\n");
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,

Просмотреть файл

@ -333,7 +333,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
ret = -ENOMEM;
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, NULL);
if (!dev_priv->mmu)
goto out_err;

Просмотреть файл

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_GUD
tristate "GUD USB Display"
depends on DRM && USB
select LZ4_COMPRESS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select BACKLIGHT_CLASS_DEVICE
help
This is a DRM display driver for GUD USB Displays or display
adapters.
If M is selected the module will be called gud.

Просмотреть файл

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
gud-y := gud_drv.o gud_pipe.o gud_connector.o
obj-$(CONFIG_DRM_GUD) += gud.o

Просмотреть файл

@ -0,0 +1,729 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2020 Noralf Trønnes
*/
#include <linux/backlight.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
struct gud_connector {
struct drm_connector connector;
struct drm_encoder encoder;
struct backlight_device *backlight;
struct work_struct backlight_work;
/* Supported properties */
u16 *properties;
unsigned int num_properties;
/* Initial gadget tv state if applicable, applied on state reset */
struct drm_tv_connector_state initial_tv_state;
/*
* Initial gadget backlight brightness if applicable, applied on state reset.
* The value -ENODEV is used to signal no backlight.
*/
int initial_brightness;
};
static inline struct gud_connector *to_gud_connector(struct drm_connector *connector)
{
return container_of(connector, struct gud_connector, connector);
}
static void gud_conn_err(struct drm_connector *connector, const char *msg, int ret)
{
dev_err(connector->dev->dev, "%s: %s (ret=%d)\n", connector->name, msg, ret);
}
/*
* Use a worker to avoid taking kms locks inside the backlight lock.
* Other display drivers use backlight within their kms locks.
* This avoids inconsistent locking rules, which would upset lockdep.
*/
static void gud_connector_backlight_update_status_work(struct work_struct *work)
{
struct gud_connector *gconn = container_of(work, struct gud_connector, backlight_work);
struct drm_connector *connector = &gconn->connector;
struct drm_connector_state *connector_state;
struct drm_device *drm = connector->dev;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
int idx, ret;
if (!drm_dev_enter(drm, &idx))
return;
state = drm_atomic_state_alloc(drm);
if (!state) {
ret = -ENOMEM;
goto exit;
}
drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
ret = PTR_ERR(connector_state);
goto out;
}
/* Reuse tv.brightness to avoid having to subclass */
connector_state->tv.brightness = gconn->backlight->props.brightness;
ret = drm_atomic_commit(state);
out:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
exit:
drm_dev_exit(idx);
if (ret)
dev_err(drm->dev, "Failed to update backlight, err=%d\n", ret);
}
static int gud_connector_backlight_update_status(struct backlight_device *bd)
{
struct drm_connector *connector = bl_get_data(bd);
struct gud_connector *gconn = to_gud_connector(connector);
/* The USB timeout is 5 seconds so use system_long_wq for worst case scenario */
queue_work(system_long_wq, &gconn->backlight_work);
return 0;
}
static const struct backlight_ops gud_connector_backlight_ops = {
.update_status = gud_connector_backlight_update_status,
};
static int gud_connector_backlight_register(struct gud_connector *gconn)
{
struct drm_connector *connector = &gconn->connector;
struct backlight_device *bd;
const char *name;
const struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.scale = BACKLIGHT_SCALE_NON_LINEAR,
.max_brightness = 100,
.brightness = gconn->initial_brightness,
};
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
connector->dev->primary->index, connector->name);
if (!name)
return -ENOMEM;
bd = backlight_device_register(name, connector->kdev, connector,
&gud_connector_backlight_ops, &props);
kfree(name);
if (IS_ERR(bd))
return PTR_ERR(bd);
gconn->backlight = bd;
return 0;
}
static int gud_connector_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct gud_device *gdrm = to_gud_device(connector->dev);
int idx, ret;
u8 status;
if (!drm_dev_enter(connector->dev, &idx))
return connector_status_disconnected;
if (force) {
ret = gud_usb_set(gdrm, GUD_REQ_SET_CONNECTOR_FORCE_DETECT,
connector->index, NULL, 0);
if (ret) {
ret = connector_status_unknown;
goto exit;
}
}
ret = gud_usb_get_u8(gdrm, GUD_REQ_GET_CONNECTOR_STATUS, connector->index, &status);
if (ret) {
ret = connector_status_unknown;
goto exit;
}
switch (status & GUD_CONNECTOR_STATUS_CONNECTED_MASK) {
case GUD_CONNECTOR_STATUS_DISCONNECTED:
ret = connector_status_disconnected;
break;
case GUD_CONNECTOR_STATUS_CONNECTED:
ret = connector_status_connected;
break;
default:
ret = connector_status_unknown;
break;
}
if (status & GUD_CONNECTOR_STATUS_CHANGED)
connector->epoch_counter += 1;
exit:
drm_dev_exit(idx);
return ret;
}
struct gud_connector_get_edid_ctx {
void *buf;
size_t len;
bool edid_override;
};
static int gud_connector_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
struct gud_connector_get_edid_ctx *ctx = data;
size_t start = block * EDID_LENGTH;
ctx->edid_override = false;
if (start + len > ctx->len)
return -1;
memcpy(buf, ctx->buf + start, len);
return 0;
}
static int gud_connector_get_modes(struct drm_connector *connector)
{
struct gud_device *gdrm = to_gud_device(connector->dev);
struct gud_display_mode_req *reqmodes = NULL;
struct gud_connector_get_edid_ctx edid_ctx;
unsigned int i, num_modes = 0;
struct edid *edid = NULL;
int idx, ret;
if (!drm_dev_enter(connector->dev, &idx))
return 0;
edid_ctx.edid_override = true;
edid_ctx.buf = kmalloc(GUD_CONNECTOR_MAX_EDID_LEN, GFP_KERNEL);
if (!edid_ctx.buf)
goto out;
ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_EDID, connector->index,
edid_ctx.buf, GUD_CONNECTOR_MAX_EDID_LEN);
if (ret > 0 && ret % EDID_LENGTH) {
gud_conn_err(connector, "Invalid EDID size", ret);
} else if (ret > 0) {
edid_ctx.len = ret;
edid = drm_do_get_edid(connector, gud_connector_get_edid_block, &edid_ctx);
}
kfree(edid_ctx.buf);
drm_connector_update_edid_property(connector, edid);
if (edid && edid_ctx.edid_override)
goto out;
reqmodes = kmalloc_array(GUD_CONNECTOR_MAX_NUM_MODES, sizeof(*reqmodes), GFP_KERNEL);
if (!reqmodes)
goto out;
ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_MODES, connector->index,
reqmodes, GUD_CONNECTOR_MAX_NUM_MODES * sizeof(*reqmodes));
if (ret <= 0)
goto out;
if (ret % sizeof(*reqmodes)) {
gud_conn_err(connector, "Invalid display mode array size", ret);
goto out;
}
num_modes = ret / sizeof(*reqmodes);
for (i = 0; i < num_modes; i++) {
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
if (!mode) {
num_modes = i;
goto out;
}
gud_to_display_mode(mode, &reqmodes[i]);
drm_mode_probed_add(connector, mode);
}
out:
if (!num_modes)
num_modes = drm_add_edid_modes(connector, edid);
kfree(reqmodes);
kfree(edid);
drm_dev_exit(idx);
return num_modes;
}
static int gud_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *new_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector_state *old_state;
new_state = drm_atomic_get_new_connector_state(state, connector);
if (!new_state->crtc)
return 0;
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
old_state->tv.margins.bottom != new_state->tv.margins.bottom ||
old_state->tv.mode != new_state->tv.mode ||
old_state->tv.brightness != new_state->tv.brightness ||
old_state->tv.contrast != new_state->tv.contrast ||
old_state->tv.flicker_reduction != new_state->tv.flicker_reduction ||
old_state->tv.overscan != new_state->tv.overscan ||
old_state->tv.saturation != new_state->tv.saturation ||
old_state->tv.hue != new_state->tv.hue)
new_crtc_state->connectors_changed = true;
return 0;
}
static const struct drm_connector_helper_funcs gud_connector_helper_funcs = {
.detect_ctx = gud_connector_detect,
.get_modes = gud_connector_get_modes,
.atomic_check = gud_connector_atomic_check,
};
static int gud_connector_late_register(struct drm_connector *connector)
{
struct gud_connector *gconn = to_gud_connector(connector);
if (gconn->initial_brightness < 0)
return 0;
return gud_connector_backlight_register(gconn);
}
static void gud_connector_early_unregister(struct drm_connector *connector)
{
struct gud_connector *gconn = to_gud_connector(connector);
backlight_device_unregister(gconn->backlight);
cancel_work_sync(&gconn->backlight_work);
}
static void gud_connector_destroy(struct drm_connector *connector)
{
struct gud_connector *gconn = to_gud_connector(connector);
drm_connector_cleanup(connector);
kfree(gconn->properties);
kfree(gconn);
}
static void gud_connector_reset(struct drm_connector *connector)
{
struct gud_connector *gconn = to_gud_connector(connector);
drm_atomic_helper_connector_reset(connector);
connector->state->tv = gconn->initial_tv_state;
/* Set margins from command line */
drm_atomic_helper_connector_tv_reset(connector);
if (gconn->initial_brightness >= 0)
connector->state->tv.brightness = gconn->initial_brightness;
}
static const struct drm_connector_funcs gud_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = gud_connector_late_register,
.early_unregister = gud_connector_early_unregister,
.destroy = gud_connector_destroy,
.reset = gud_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
/*
* The tv.mode property is shared among the connectors and its enum names are
* driver specific. This means that if more than one connector uses tv.mode,
* the enum names has to be the same.
*/
static int gud_connector_add_tv_mode(struct gud_device *gdrm, struct drm_connector *connector)
{
size_t buf_len = GUD_CONNECTOR_TV_MODE_MAX_NUM * GUD_CONNECTOR_TV_MODE_NAME_LEN;
const char *modes[GUD_CONNECTOR_TV_MODE_MAX_NUM];
unsigned int i, num_modes;
char *buf;
int ret;
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES,
connector->index, buf, buf_len);
if (ret < 0)
goto free;
if (!ret || ret % GUD_CONNECTOR_TV_MODE_NAME_LEN) {
ret = -EIO;
goto free;
}
num_modes = ret / GUD_CONNECTOR_TV_MODE_NAME_LEN;
for (i = 0; i < num_modes; i++)
modes[i] = &buf[i * GUD_CONNECTOR_TV_MODE_NAME_LEN];
ret = drm_mode_create_tv_properties(connector->dev, num_modes, modes);
free:
kfree(buf);
if (ret < 0)
gud_conn_err(connector, "Failed to add TV modes", ret);
return ret;
}
static struct drm_property *
gud_connector_property_lookup(struct drm_connector *connector, u16 prop)
{
struct drm_mode_config *config = &connector->dev->mode_config;
switch (prop) {
case GUD_PROPERTY_TV_LEFT_MARGIN:
return config->tv_left_margin_property;
case GUD_PROPERTY_TV_RIGHT_MARGIN:
return config->tv_right_margin_property;
case GUD_PROPERTY_TV_TOP_MARGIN:
return config->tv_top_margin_property;
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return config->tv_bottom_margin_property;
case GUD_PROPERTY_TV_MODE:
return config->tv_mode_property;
case GUD_PROPERTY_TV_BRIGHTNESS:
return config->tv_brightness_property;
case GUD_PROPERTY_TV_CONTRAST:
return config->tv_contrast_property;
case GUD_PROPERTY_TV_FLICKER_REDUCTION:
return config->tv_flicker_reduction_property;
case GUD_PROPERTY_TV_OVERSCAN:
return config->tv_overscan_property;
case GUD_PROPERTY_TV_SATURATION:
return config->tv_saturation_property;
case GUD_PROPERTY_TV_HUE:
return config->tv_hue_property;
default:
return ERR_PTR(-EINVAL);
}
}
static unsigned int *gud_connector_tv_state_val(u16 prop, struct drm_tv_connector_state *state)
{
switch (prop) {
case GUD_PROPERTY_TV_LEFT_MARGIN:
return &state->margins.left;
case GUD_PROPERTY_TV_RIGHT_MARGIN:
return &state->margins.right;
case GUD_PROPERTY_TV_TOP_MARGIN:
return &state->margins.top;
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return &state->margins.bottom;
case GUD_PROPERTY_TV_MODE:
return &state->mode;
case GUD_PROPERTY_TV_BRIGHTNESS:
return &state->brightness;
case GUD_PROPERTY_TV_CONTRAST:
return &state->contrast;
case GUD_PROPERTY_TV_FLICKER_REDUCTION:
return &state->flicker_reduction;
case GUD_PROPERTY_TV_OVERSCAN:
return &state->overscan;
case GUD_PROPERTY_TV_SATURATION:
return &state->saturation;
case GUD_PROPERTY_TV_HUE:
return &state->hue;
default:
return ERR_PTR(-EINVAL);
}
}
static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_connector *gconn)
{
struct drm_connector *connector = &gconn->connector;
struct drm_device *drm = &gdrm->drm;
struct gud_property_req *properties;
unsigned int i, num_properties;
int ret;
properties = kcalloc(GUD_CONNECTOR_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL);
if (!properties)
return -ENOMEM;
ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_PROPERTIES, connector->index,
properties, GUD_CONNECTOR_PROPERTIES_MAX_NUM * sizeof(*properties));
if (ret <= 0)
goto out;
if (ret % sizeof(*properties)) {
ret = -EIO;
goto out;
}
num_properties = ret / sizeof(*properties);
ret = 0;
gconn->properties = kcalloc(num_properties, sizeof(*gconn->properties), GFP_KERNEL);
if (!gconn->properties) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_properties; i++) {
u16 prop = le16_to_cpu(properties[i].prop);
u64 val = le64_to_cpu(properties[i].val);
struct drm_property *property;
unsigned int *state_val;
drm_dbg(drm, "property: %u = %llu(0x%llx)\n", prop, val, val);
switch (prop) {
case GUD_PROPERTY_TV_LEFT_MARGIN:
fallthrough;
case GUD_PROPERTY_TV_RIGHT_MARGIN:
fallthrough;
case GUD_PROPERTY_TV_TOP_MARGIN:
fallthrough;
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
ret = drm_mode_create_tv_margin_properties(drm);
if (ret)
goto out;
break;
case GUD_PROPERTY_TV_MODE:
ret = gud_connector_add_tv_mode(gdrm, connector);
if (ret)
goto out;
break;
case GUD_PROPERTY_TV_BRIGHTNESS:
fallthrough;
case GUD_PROPERTY_TV_CONTRAST:
fallthrough;
case GUD_PROPERTY_TV_FLICKER_REDUCTION:
fallthrough;
case GUD_PROPERTY_TV_OVERSCAN:
fallthrough;
case GUD_PROPERTY_TV_SATURATION:
fallthrough;
case GUD_PROPERTY_TV_HUE:
/* This is a no-op if already added. */
ret = drm_mode_create_tv_properties(drm, 0, NULL);
if (ret)
goto out;
break;
case GUD_PROPERTY_BACKLIGHT_BRIGHTNESS:
if (val > 100) {
ret = -EINVAL;
goto out;
}
gconn->initial_brightness = val;
break;
default:
/* New ones might show up in future devices, skip those we don't know. */
drm_dbg(drm, "Ignoring unknown property: %u\n", prop);
continue;
}
gconn->properties[gconn->num_properties++] = prop;
if (prop == GUD_PROPERTY_BACKLIGHT_BRIGHTNESS)
continue; /* not a DRM property */
property = gud_connector_property_lookup(connector, prop);
if (WARN_ON(IS_ERR(property)))
continue;
state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state);
if (WARN_ON(IS_ERR(state_val)))
continue;
*state_val = val;
drm_object_attach_property(&connector->base, property, 0);
}
out:
kfree(properties);
return ret;
}
int gud_connector_fill_properties(struct drm_connector_state *connector_state,
struct gud_property_req *properties)
{
struct gud_connector *gconn = to_gud_connector(connector_state->connector);
unsigned int i;
for (i = 0; i < gconn->num_properties; i++) {
u16 prop = gconn->properties[i];
u64 val;
if (prop == GUD_PROPERTY_BACKLIGHT_BRIGHTNESS) {
val = connector_state->tv.brightness;
} else {
unsigned int *state_val;
state_val = gud_connector_tv_state_val(prop, &connector_state->tv);
if (WARN_ON_ONCE(IS_ERR(state_val)))
return PTR_ERR(state_val);
val = *state_val;
}
properties[i].prop = cpu_to_le16(prop);
properties[i].val = cpu_to_le64(val);
}
return gconn->num_properties;
}
static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
struct gud_connector_descriptor_req *desc)
{
struct drm_device *drm = &gdrm->drm;
struct gud_connector *gconn;
struct drm_connector *connector;
struct drm_encoder *encoder;
int ret, connector_type;
u32 flags;
gconn = kzalloc(sizeof(*gconn), GFP_KERNEL);
if (!gconn)
return -ENOMEM;
INIT_WORK(&gconn->backlight_work, gud_connector_backlight_update_status_work);
gconn->initial_brightness = -ENODEV;
flags = le32_to_cpu(desc->flags);
connector = &gconn->connector;
drm_dbg(drm, "Connector: index=%u type=%u flags=0x%x\n", index, desc->connector_type, flags);
switch (desc->connector_type) {
case GUD_CONNECTOR_TYPE_PANEL:
connector_type = DRM_MODE_CONNECTOR_USB;
break;
case GUD_CONNECTOR_TYPE_VGA:
connector_type = DRM_MODE_CONNECTOR_VGA;
break;
case GUD_CONNECTOR_TYPE_DVI:
connector_type = DRM_MODE_CONNECTOR_DVID;
break;
case GUD_CONNECTOR_TYPE_COMPOSITE:
connector_type = DRM_MODE_CONNECTOR_Composite;
break;
case GUD_CONNECTOR_TYPE_SVIDEO:
connector_type = DRM_MODE_CONNECTOR_SVIDEO;
break;
case GUD_CONNECTOR_TYPE_COMPONENT:
connector_type = DRM_MODE_CONNECTOR_Component;
break;
case GUD_CONNECTOR_TYPE_DISPLAYPORT:
connector_type = DRM_MODE_CONNECTOR_DisplayPort;
break;
case GUD_CONNECTOR_TYPE_HDMI:
connector_type = DRM_MODE_CONNECTOR_HDMIA;
break;
default: /* future types */
connector_type = DRM_MODE_CONNECTOR_USB;
break;
}
drm_connector_helper_add(connector, &gud_connector_helper_funcs);
ret = drm_connector_init(drm, connector, &gud_connector_funcs, connector_type);
if (ret) {
kfree(connector);
return ret;
}
if (WARN_ON(connector->index != index))
return -EINVAL;
if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
connector->polled = (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT);
if (flags & GUD_CONNECTOR_FLAGS_INTERLACE)
connector->interlace_allowed = true;
if (flags & GUD_CONNECTOR_FLAGS_DOUBLESCAN)
connector->doublescan_allowed = true;
ret = gud_connector_add_properties(gdrm, gconn);
if (ret) {
gud_conn_err(connector, "Failed to add properties", ret);
return ret;
}
/* The first connector is attached to the existing simple pipe encoder */
if (!connector->index) {
encoder = &gdrm->pipe.encoder;
} else {
encoder = &gconn->encoder;
ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
if (ret)
return ret;
encoder->possible_crtcs = 1;
}
return drm_connector_attach_encoder(connector, encoder);
}
int gud_get_connectors(struct gud_device *gdrm)
{
struct gud_connector_descriptor_req *descs;
unsigned int i, num_connectors;
int ret;
descs = kmalloc_array(GUD_CONNECTORS_MAX_NUM, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTORS, 0,
descs, GUD_CONNECTORS_MAX_NUM * sizeof(*descs));
if (ret < 0)
goto free;
if (!ret || ret % sizeof(*descs)) {
ret = -EIO;
goto free;
}
num_connectors = ret / sizeof(*descs);
for (i = 0; i < num_connectors; i++) {
ret = gud_connector_create(gdrm, i, &descs[i]);
if (ret)
goto free;
}
free:
kfree(descs);
return ret;
}

Просмотреть файл

@ -0,0 +1,661 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2020 Noralf Trønnes
*/
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/lz4.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/string_helpers.h>
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
/* Only used internally */
static const struct drm_format_info gud_drm_format_r1 = {
.format = GUD_DRM_FORMAT_R1,
.num_planes = 1,
.char_per_block = { 1, 0, 0 },
.block_w = { 8, 0, 0 },
.block_h = { 1, 0, 0 },
.hsub = 1,
.vsub = 1,
};
static const struct drm_format_info gud_drm_format_xrgb1111 = {
.format = GUD_DRM_FORMAT_XRGB1111,
.num_planes = 1,
.char_per_block = { 1, 0, 0 },
.block_w = { 2, 0, 0 },
.block_h = { 1, 0, 0 },
.hsub = 1,
.vsub = 1,
};
static int gud_usb_control_msg(struct usb_interface *intf, bool in,
u8 request, u16 value, void *buf, size_t len)
{
u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
struct usb_device *usb = interface_to_usbdev(intf);
unsigned int pipe;
if (len && !buf)
return -EINVAL;
if (in) {
pipe = usb_rcvctrlpipe(usb, 0);
requesttype |= USB_DIR_IN;
} else {
pipe = usb_sndctrlpipe(usb, 0);
requesttype |= USB_DIR_OUT;
}
return usb_control_msg(usb, pipe, request, requesttype, value,
ifnum, buf, len, USB_CTRL_GET_TIMEOUT);
}
static int gud_get_display_descriptor(struct usb_interface *intf,
struct gud_display_descriptor_req *desc)
{
void *buf;
int ret;
buf = kmalloc(sizeof(*desc), GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc));
memcpy(desc, buf, sizeof(*desc));
kfree(buf);
if (ret < 0)
return ret;
if (ret != sizeof(*desc))
return -EIO;
if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC))
return -ENODATA;
DRM_DEV_DEBUG_DRIVER(&intf->dev,
"version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n",
desc->version, le32_to_cpu(desc->flags), desc->compression,
le32_to_cpu(desc->max_buffer_size));
if (!desc->version || !desc->max_width || !desc->max_height ||
le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) ||
le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height))
return -EINVAL;
return 0;
}
static int gud_status_to_errno(u8 status)
{
switch (status) {
case GUD_STATUS_OK:
return 0;
case GUD_STATUS_BUSY:
return -EBUSY;
case GUD_STATUS_REQUEST_NOT_SUPPORTED:
return -EOPNOTSUPP;
case GUD_STATUS_PROTOCOL_ERROR:
return -EPROTO;
case GUD_STATUS_INVALID_PARAMETER:
return -EINVAL;
case GUD_STATUS_ERROR:
return -EREMOTEIO;
default:
return -EREMOTEIO;
}
}
static int gud_usb_get_status(struct usb_interface *intf)
{
int ret, status = -EIO;
u8 *buf;
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf));
if (ret == sizeof(*buf))
status = gud_status_to_errno(*buf);
kfree(buf);
if (ret < 0)
return ret;
return status;
}
static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index,
void *buf, size_t len)
{
struct usb_interface *intf = to_usb_interface(gdrm->drm.dev);
int idx, ret;
drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n",
in ? "get" : "set", request, index, len);
if (!drm_dev_enter(&gdrm->drm, &idx))
return -ENODEV;
mutex_lock(&gdrm->ctrl_lock);
ret = gud_usb_control_msg(intf, in, request, index, buf, len);
if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) {
int status;
status = gud_usb_get_status(intf);
if (status < 0) {
ret = status;
} else if (ret < 0) {
dev_err_once(gdrm->drm.dev,
"Unexpected status OK for failed transfer\n");
ret = -EPIPE;
}
}
if (ret < 0) {
drm_dbg(&gdrm->drm, "ret=%d\n", ret);
gdrm->stats_num_errors++;
}
mutex_unlock(&gdrm->ctrl_lock);
drm_dev_exit(idx);
return ret;
}
/*
* @buf cannot be allocated on the stack.
* Returns number of bytes received or negative error code on failure.
*/
int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len)
{
return gud_usb_transfer(gdrm, true, request, index, buf, max_len);
}
/*
* @buf can be allocated on the stack or NULL.
* Returns zero on success or negative error code on failure.
*/
int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len)
{
void *trbuf = NULL;
int ret;
if (buf && len) {
trbuf = kmemdup(buf, len, GFP_KERNEL);
if (!trbuf)
return -ENOMEM;
}
ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len);
kfree(trbuf);
if (ret < 0)
return ret;
return ret != len ? -EIO : 0;
}
/*
* @val can be allocated on the stack.
* Returns zero on success or negative error code on failure.
*/
int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val)
{
u8 *buf;
int ret;
buf = kmalloc(sizeof(*val), GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val));
*val = *buf;
kfree(buf);
if (ret < 0)
return ret;
return ret != sizeof(*val) ? -EIO : 0;
}
/* Returns zero on success or negative error code on failure. */
int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
{
return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
}
static int gud_get_properties(struct gud_device *gdrm)
{
struct gud_property_req *properties;
unsigned int i, num_properties;
int ret;
properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL);
if (!properties)
return -ENOMEM;
ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0,
properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties));
if (ret <= 0)
goto out;
if (ret % sizeof(*properties)) {
ret = -EIO;
goto out;
}
num_properties = ret / sizeof(*properties);
ret = 0;
gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties),
GFP_KERNEL);
if (!gdrm->properties) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_properties; i++) {
u16 prop = le16_to_cpu(properties[i].prop);
u64 val = le64_to_cpu(properties[i].val);
switch (prop) {
case GUD_PROPERTY_ROTATION:
/*
* DRM UAPI matches the protocol so use the value directly,
* but mask out any additions on future devices.
*/
val &= GUD_ROTATION_MASK;
ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
DRM_MODE_ROTATE_0, val);
break;
default:
/* New ones might show up in future devices, skip those we don't know. */
drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop);
continue;
}
if (ret)
goto out;
gdrm->properties[gdrm->num_properties++] = prop;
}
out:
kfree(properties);
return ret;
}
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
* See todo.rst for how to fix the issue in the dma-buf framework.
*/
static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
{
struct gud_device *gdrm = to_gud_device(drm);
if (!gdrm->dmadev)
return ERR_PTR(-ENODEV);
return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
}
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct gud_device *gdrm = to_gud_device(node->minor->dev);
char buf[10];
string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
seq_printf(m, "Max buffer size: %s\n", buf);
seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors);
seq_puts(m, "Compression: ");
if (gdrm->compression & GUD_COMPRESSION_LZ4)
seq_puts(m, " lz4");
if (!gdrm->compression)
seq_puts(m, " none");
seq_puts(m, "\n");
if (gdrm->compression) {
u64 remainder;
u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length,
&remainder);
u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length);
seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac);
}
return 0;
}
static const struct drm_info_list gud_debugfs_list[] = {
{ "stats", gud_stats_debugfs, 0, NULL },
};
static void gud_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
minor->debugfs_root, minor);
}
static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
.check = gud_pipe_check,
.update = gud_pipe_update,
.prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const u64 gud_pipe_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
DEFINE_DRM_GEM_FOPS(gud_fops);
static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gud_gem_prime_import,
.debugfs_init = gud_debugfs_init,
.name = "gud",
.desc = "Generic USB Display",
.date = "20200422",
.major = 1,
.minor = 0,
};
static void gud_free_buffers_and_mutex(struct drm_device *drm, void *unused)
{
struct gud_device *gdrm = to_gud_device(drm);
vfree(gdrm->compress_buf);
kfree(gdrm->bulk_buf);
mutex_destroy(&gdrm->ctrl_lock);
mutex_destroy(&gdrm->damage_lock);
}
static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
const struct drm_format_info *xrgb8888_emulation_format = NULL;
bool rgb565_supported = false, xrgb8888_supported = false;
unsigned int num_formats_dev, num_formats = 0;
struct usb_endpoint_descriptor *bulk_out;
struct gud_display_descriptor_req desc;
struct device *dev = &intf->dev;
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
u8 *formats_dev;
u32 *formats;
int ret, i;
ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out);
if (ret)
return ret;
ret = gud_get_display_descriptor(intf, &desc);
if (ret) {
DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret);
return -ENODEV;
}
if (desc.version > 1) {
dev_err(dev, "Protocol version %u is not supported\n", desc.version);
return -ENODEV;
}
gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm);
if (IS_ERR(gdrm))
return PTR_ERR(gdrm);
drm = &gdrm->drm;
drm->mode_config.funcs = &gud_mode_config_funcs;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
gdrm->flags = le32_to_cpu(desc.flags);
gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression)
return -EINVAL;
mutex_init(&gdrm->ctrl_lock);
mutex_init(&gdrm->damage_lock);
INIT_WORK(&gdrm->work, gud_flush_work);
gud_clear_damage(gdrm);
ret = drmm_add_action_or_reset(drm, gud_free_buffers_and_mutex, NULL);
if (ret)
return ret;
drm->mode_config.min_width = le32_to_cpu(desc.min_width);
drm->mode_config.max_width = le32_to_cpu(desc.max_width);
drm->mode_config.min_height = le32_to_cpu(desc.min_height);
drm->mode_config.max_height = le32_to_cpu(desc.max_height);
formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
/* Add room for emulated XRGB8888 */
formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
if (!formats_dev || !formats)
return -ENOMEM;
ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM);
if (ret < 0)
return ret;
num_formats_dev = ret;
for (i = 0; i < num_formats_dev; i++) {
const struct drm_format_info *info;
size_t fmt_buf_size;
u32 format;
format = gud_to_fourcc(formats_dev[i]);
if (!format) {
drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]);
continue;
}
if (format == GUD_DRM_FORMAT_R1)
info = &gud_drm_format_r1;
else if (format == GUD_DRM_FORMAT_XRGB1111)
info = &gud_drm_format_xrgb1111;
else
info = drm_format_info(format);
switch (format) {
case GUD_DRM_FORMAT_R1:
fallthrough;
case GUD_DRM_FORMAT_XRGB1111:
if (!xrgb8888_emulation_format)
xrgb8888_emulation_format = info;
break;
case DRM_FORMAT_RGB565:
rgb565_supported = true;
if (!xrgb8888_emulation_format)
xrgb8888_emulation_format = info;
break;
case DRM_FORMAT_XRGB8888:
xrgb8888_supported = true;
break;
}
fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) *
drm->mode_config.max_height;
max_buffer_size = max(max_buffer_size, fmt_buf_size);
if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111)
continue; /* Internal not for userspace */
formats[num_formats++] = format;
}
if (!num_formats && !xrgb8888_emulation_format) {
dev_err(dev, "No supported pixel formats found\n");
return -EINVAL;
}
/* Prefer speed over color depth */
if (rgb565_supported)
drm->mode_config.preferred_depth = 16;
if (!xrgb8888_supported && xrgb8888_emulation_format) {
gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format;
formats[num_formats++] = DRM_FORMAT_XRGB8888;
}
if (desc.max_buffer_size)
max_buffer_size = le32_to_cpu(desc.max_buffer_size);
retry:
/*
* Use plain kmalloc here since devm_kmalloc() places struct devres at the beginning
* of the buffer it allocates. This wastes a lot of memory when allocating big buffers.
* Asking for 2M would actually allocate 4M. This would also prevent getting the biggest
* possible buffer potentially leading to split transfers.
*/
gdrm->bulk_buf = kmalloc(max_buffer_size, GFP_KERNEL | __GFP_NOWARN);
if (!gdrm->bulk_buf) {
max_buffer_size = roundup_pow_of_two(max_buffer_size) / 2;
if (max_buffer_size < SZ_512K)
return -ENOMEM;
goto retry;
}
gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out));
gdrm->bulk_len = max_buffer_size;
if (gdrm->compression & GUD_COMPRESSION_LZ4) {
gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL);
if (!gdrm->lz4_comp_mem)
return -ENOMEM;
gdrm->compress_buf = vmalloc(gdrm->bulk_len);
if (!gdrm->compress_buf)
return -ENOMEM;
}
ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
formats, num_formats,
gud_pipe_modifiers, NULL);
if (ret)
return ret;
devm_kfree(dev, formats);
devm_kfree(dev, formats_dev);
ret = gud_get_properties(gdrm);
if (ret) {
dev_err(dev, "Failed to get properties (error=%d)\n", ret);
return ret;
}
drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
ret = gud_get_connectors(gdrm);
if (ret) {
dev_err(dev, "Failed to get connectors (error=%d)\n", ret);
return ret;
}
drm_mode_config_reset(drm);
usb_set_intfdata(intf, gdrm);
gdrm->dmadev = usb_intf_get_dma_device(intf);
if (!gdrm->dmadev)
dev_warn(dev, "buffer sharing not supported");
ret = drm_dev_register(drm, 0);
if (ret) {
put_device(gdrm->dmadev);
return ret;
}
drm_kms_helper_poll_init(drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void gud_disconnect(struct usb_interface *interface)
{
struct gud_device *gdrm = usb_get_intfdata(interface);
struct drm_device *drm = &gdrm->drm;
drm_dbg(drm, "%s:\n", __func__);
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
put_device(gdrm->dmadev);
gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
{
struct gud_device *gdrm = usb_get_intfdata(intf);
return drm_mode_config_helper_suspend(&gdrm->drm);
}
static int gud_resume(struct usb_interface *intf)
{
struct gud_device *gdrm = usb_get_intfdata(intf);
drm_mode_config_helper_resume(&gdrm->drm);
return 0;
}
static const struct usb_device_id gud_id_table[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) },
{ }
};
MODULE_DEVICE_TABLE(usb, gud_id_table);
static struct usb_driver gud_usb_driver = {
.name = "gud",
.probe = gud_probe,
.disconnect = gud_disconnect,
.id_table = gud_id_table,
.suspend = gud_suspend,
.resume = gud_resume,
.reset_resume = gud_resume,
};
module_usb_driver(gud_usb_driver);
MODULE_AUTHOR("Noralf Trønnes");
MODULE_LICENSE("Dual MIT/GPL");

Просмотреть файл

@ -0,0 +1,154 @@
/* SPDX-License-Identifier: MIT */
#ifndef __LINUX_GUD_INTERNAL_H
#define __LINUX_GUD_INTERNAL_H
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modes.h>
#include <drm/drm_simple_kms_helper.h>
struct gud_device {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
struct device *dmadev;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
u16 *properties;
unsigned int num_properties;
unsigned int bulk_pipe;
void *bulk_buf;
size_t bulk_len;
u8 compression;
void *lz4_comp_mem;
void *compress_buf;
u64 stats_length;
u64 stats_actual_length;
unsigned int stats_num_errors;
struct mutex ctrl_lock; /* Serialize get/set and status transfers */
struct mutex damage_lock; /* Protects the following members: */
struct drm_framebuffer *fb;
struct drm_rect damage;
bool prev_flush_failed;
};
static inline struct gud_device *to_gud_device(struct drm_device *drm)
{
return container_of(drm, struct gud_device, drm);
}
static inline struct usb_device *gud_to_usb_device(struct gud_device *gdrm)
{
return interface_to_usbdev(to_usb_interface(gdrm->drm.dev));
}
int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len);
int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len);
int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val);
int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val);
void gud_clear_damage(struct gud_device *gdrm);
void gud_flush_work(struct work_struct *work);
int gud_pipe_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *new_crtc_state);
void gud_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
int gud_connector_fill_properties(struct drm_connector_state *connector_state,
struct gud_property_req *properties);
int gud_get_connectors(struct gud_device *gdrm);
/* Driver internal fourcc transfer formats */
#define GUD_DRM_FORMAT_R1 0x00000122
#define GUD_DRM_FORMAT_XRGB1111 0x03121722
static inline u8 gud_from_fourcc(u32 fourcc)
{
switch (fourcc) {
case GUD_DRM_FORMAT_R1:
return GUD_PIXEL_FORMAT_R1;
case GUD_DRM_FORMAT_XRGB1111:
return GUD_PIXEL_FORMAT_XRGB1111;
case DRM_FORMAT_RGB565:
return GUD_PIXEL_FORMAT_RGB565;
case DRM_FORMAT_XRGB8888:
return GUD_PIXEL_FORMAT_XRGB8888;
case DRM_FORMAT_ARGB8888:
return GUD_PIXEL_FORMAT_ARGB8888;
};
return 0;
}
static inline u32 gud_to_fourcc(u8 format)
{
switch (format) {
case GUD_PIXEL_FORMAT_R1:
return GUD_DRM_FORMAT_R1;
case GUD_PIXEL_FORMAT_XRGB1111:
return GUD_DRM_FORMAT_XRGB1111;
case GUD_PIXEL_FORMAT_RGB565:
return DRM_FORMAT_RGB565;
case GUD_PIXEL_FORMAT_XRGB8888:
return DRM_FORMAT_XRGB8888;
case GUD_PIXEL_FORMAT_ARGB8888:
return DRM_FORMAT_ARGB8888;
};
return 0;
}
static inline void gud_from_display_mode(struct gud_display_mode_req *dst,
const struct drm_display_mode *src)
{
u32 flags = src->flags & GUD_DISPLAY_MODE_FLAG_USER_MASK;
if (src->type & DRM_MODE_TYPE_PREFERRED)
flags |= GUD_DISPLAY_MODE_FLAG_PREFERRED;
dst->clock = cpu_to_le32(src->clock);
dst->hdisplay = cpu_to_le16(src->hdisplay);
dst->hsync_start = cpu_to_le16(src->hsync_start);
dst->hsync_end = cpu_to_le16(src->hsync_end);
dst->htotal = cpu_to_le16(src->htotal);
dst->vdisplay = cpu_to_le16(src->vdisplay);
dst->vsync_start = cpu_to_le16(src->vsync_start);
dst->vsync_end = cpu_to_le16(src->vsync_end);
dst->vtotal = cpu_to_le16(src->vtotal);
dst->flags = cpu_to_le32(flags);
}
static inline void gud_to_display_mode(struct drm_display_mode *dst,
const struct gud_display_mode_req *src)
{
u32 flags = le32_to_cpu(src->flags);
memset(dst, 0, sizeof(*dst));
dst->clock = le32_to_cpu(src->clock);
dst->hdisplay = le16_to_cpu(src->hdisplay);
dst->hsync_start = le16_to_cpu(src->hsync_start);
dst->hsync_end = le16_to_cpu(src->hsync_end);
dst->htotal = le16_to_cpu(src->htotal);
dst->vdisplay = le16_to_cpu(src->vdisplay);
dst->vsync_start = le16_to_cpu(src->vsync_start);
dst->vsync_end = le16_to_cpu(src->vsync_end);
dst->vtotal = le16_to_cpu(src->vtotal);
dst->flags = flags & GUD_DISPLAY_MODE_FLAG_USER_MASK;
dst->type = DRM_MODE_TYPE_DRIVER;
if (flags & GUD_DISPLAY_MODE_FLAG_PREFERRED)
dst->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(dst);
}
#endif

Просмотреть файл

@ -0,0 +1,552 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2020 Noralf Trønnes
*/
#include <linux/dma-buf.h>
#include <linux/lz4.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
/*
* FIXME: The driver is probably broken on Big Endian machines.
* See discussion:
* https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
*/
static bool gud_is_big_endian(void)
{
#if defined(__BIG_ENDIAN)
return true;
#else
return false;
#endif
}
static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
void *src, struct drm_framebuffer *fb,
struct drm_rect *rect)
{
unsigned int block_width = drm_format_info_block_width(format, 0);
unsigned int bits_per_pixel = 8 / block_width;
unsigned int x, y, width, height;
u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
size_t len;
void *buf;
WARN_ON_ONCE(format->char_per_block[0] != 1);
/* Start on a byte boundary */
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
width = drm_rect_width(rect);
height = drm_rect_height(rect);
len = drm_format_info_min_pitch(format, 0, width) * height;
buf = kmalloc(width * height, GFP_KERNEL);
if (!buf)
return 0;
drm_fb_xrgb8888_to_gray8(buf, src, fb, rect);
pix8 = buf;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
unsigned int pixpos = x % block_width; /* within byte from the left */
unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
if (!pixpos) {
block = dst++;
*block = 0;
}
pix = (*pix8++) >> (8 - bits_per_pixel);
*block |= pix << pixshift;
}
}
kfree(buf);
return len;
}
static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
void *src, struct drm_framebuffer *fb,
struct drm_rect *rect)
{
unsigned int block_width = drm_format_info_block_width(format, 0);
unsigned int bits_per_pixel = 8 / block_width;
u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
unsigned int x, y, width;
u32 *pix32;
size_t len;
/* Start on a byte boundary */
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
width = drm_rect_width(rect);
len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
for (y = rect->y1; y < rect->y2; y++) {
pix32 = src + (y * fb->pitches[0]);
pix32 += rect->x1;
for (x = 0; x < width; x++) {
unsigned int pixpos = x % block_width; /* within byte from the left */
unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
if (!pixpos) {
block = dst++;
*block = 0;
}
r = *pix32 >> 16;
g = *pix32 >> 8;
b = *pix32++;
switch (format->format) {
case GUD_DRM_FORMAT_XRGB1111:
pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
break;
default:
WARN_ON_ONCE(1);
return len;
}
*block |= pix << pixshift;
}
}
return len;
}
static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
const struct drm_format_info *format, struct drm_rect *rect,
struct gud_set_buffer_req *req)
{
struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
u8 compression = gdrm->compression;
struct dma_buf_map map;
void *vaddr, *buf;
size_t pitch, len;
int ret = 0;
pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
len = pitch * drm_rect_height(rect);
if (len > gdrm->bulk_len)
return -E2BIG;
ret = drm_gem_shmem_vmap(fb->obj[0], &map);
if (ret)
return ret;
vaddr = map.vaddr + fb->offsets[0];
if (import_attach) {
ret = dma_buf_begin_cpu_access(import_attach->dmabuf, DMA_FROM_DEVICE);
if (ret)
goto vunmap;
}
retry:
if (compression)
buf = gdrm->compress_buf;
else
buf = gdrm->bulk_buf;
/*
* Imported buffers are assumed to be write-combined and thus uncached
* with slow reads (at least on ARM).
*/
if (format != fb->format) {
if (format->format == GUD_DRM_FORMAT_R1) {
len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
if (!len) {
ret = -ENOMEM;
goto end_cpu_access;
}
} else if (format->format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian());
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
drm_fb_swab(buf, vaddr, fb, rect, !import_attach);
} else if (compression && !import_attach && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
drm_fb_memcpy(buf, vaddr, fb, rect);
}
memset(req, 0, sizeof(*req));
req->x = cpu_to_le32(rect->x1);
req->y = cpu_to_le32(rect->y1);
req->width = cpu_to_le32(drm_rect_width(rect));
req->height = cpu_to_le32(drm_rect_height(rect));
req->length = cpu_to_le32(len);
if (compression & GUD_COMPRESSION_LZ4) {
int complen;
complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
if (complen <= 0) {
compression = 0;
goto retry;
}
req->compression = GUD_COMPRESSION_LZ4;
req->compressed_length = cpu_to_le32(complen);
}
end_cpu_access:
if (import_attach)
dma_buf_end_cpu_access(import_attach->dmabuf, DMA_FROM_DEVICE);
vunmap:
drm_gem_shmem_vunmap(fb->obj[0], &map);
return ret;
}
static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
const struct drm_format_info *format, struct drm_rect *rect)
{
struct usb_device *usb = gud_to_usb_device(gdrm);
struct gud_set_buffer_req req;
int ret, actual_length;
size_t len, trlen;
drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
ret = gud_prep_flush(gdrm, fb, format, rect, &req);
if (ret)
return ret;
len = le32_to_cpu(req.length);
if (req.compression)
trlen = le32_to_cpu(req.compressed_length);
else
trlen = len;
gdrm->stats_length += len;
/* Did it wrap around? */
if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
gdrm->stats_length = len;
gdrm->stats_actual_length = 0;
}
gdrm->stats_actual_length += trlen;
if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
if (ret)
return ret;
}
ret = usb_bulk_msg(usb, gdrm->bulk_pipe, gdrm->bulk_buf, trlen,
&actual_length, msecs_to_jiffies(3000));
if (!ret && trlen != actual_length)
ret = -EIO;
if (ret)
gdrm->stats_num_errors++;
return ret;
}
void gud_clear_damage(struct gud_device *gdrm)
{
gdrm->damage.x1 = INT_MAX;
gdrm->damage.y1 = INT_MAX;
gdrm->damage.x2 = 0;
gdrm->damage.y2 = 0;
}
static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
{
gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
}
static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
struct drm_rect *damage)
{
/*
* pipe_update waits for the worker when the display mode is going to change.
* This ensures that the width and height is still the same making it safe to
* add back the damage.
*/
mutex_lock(&gdrm->damage_lock);
if (!gdrm->fb) {
drm_framebuffer_get(fb);
gdrm->fb = fb;
}
gud_add_damage(gdrm, damage);
mutex_unlock(&gdrm->damage_lock);
/* Retry only once to avoid a possible storm in case of continues errors. */
if (!gdrm->prev_flush_failed)
queue_work(system_long_wq, &gdrm->work);
gdrm->prev_flush_failed = true;
}
void gud_flush_work(struct work_struct *work)
{
struct gud_device *gdrm = container_of(work, struct gud_device, work);
const struct drm_format_info *format;
struct drm_framebuffer *fb;
struct drm_rect damage;
unsigned int i, lines;
int idx, ret = 0;
size_t pitch;
if (!drm_dev_enter(&gdrm->drm, &idx))
return;
mutex_lock(&gdrm->damage_lock);
fb = gdrm->fb;
gdrm->fb = NULL;
damage = gdrm->damage;
gud_clear_damage(gdrm);
mutex_unlock(&gdrm->damage_lock);
if (!fb)
goto out;
format = fb->format;
if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
format = gdrm->xrgb8888_emulation_format;
/* Split update if it's too big */
pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
lines = drm_rect_height(&damage);
if (gdrm->bulk_len < lines * pitch)
lines = gdrm->bulk_len / pitch;
for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
struct drm_rect rect = damage;
rect.y1 += i * lines;
rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
ret = gud_flush_rect(gdrm, fb, format, &rect);
if (ret) {
if (ret != -ENODEV && ret != -ECONNRESET &&
ret != -ESHUTDOWN && ret != -EPROTO) {
bool prev_flush_failed = gdrm->prev_flush_failed;
gud_retry_failed_flush(gdrm, fb, &damage);
if (!prev_flush_failed)
dev_err_ratelimited(fb->dev->dev,
"Failed to flush framebuffer: error=%d\n", ret);
}
break;
}
gdrm->prev_flush_failed = false;
}
drm_framebuffer_put(fb);
out:
drm_dev_exit(idx);
}
static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
struct drm_rect *damage)
{
struct drm_framebuffer *old_fb = NULL;
mutex_lock(&gdrm->damage_lock);
if (fb != gdrm->fb) {
old_fb = gdrm->fb;
drm_framebuffer_get(fb);
gdrm->fb = fb;
}
gud_add_damage(gdrm, damage);
mutex_unlock(&gdrm->damage_lock);
queue_work(system_long_wq, &gdrm->work);
if (old_fb)
drm_framebuffer_put(old_fb);
}
int gud_pipe_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *new_crtc_state)
{
struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
struct drm_plane_state *old_plane_state = pipe->plane.state;
const struct drm_display_mode *mode = &new_crtc_state->mode;
struct drm_atomic_state *state = new_plane_state->state;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
const struct drm_format_info *format = fb->format;
struct drm_connector *connector;
unsigned int i, num_properties;
struct gud_state_req *req;
int idx, ret;
size_t len;
if (WARN_ON_ONCE(!fb))
return -EINVAL;
if (old_plane_state->rotation != new_plane_state->rotation)
new_crtc_state->mode_changed = true;
if (old_fb && old_fb->format != format)
new_crtc_state->mode_changed = true;
if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
return 0;
/* Only one connector is supported */
if (hweight32(new_crtc_state->connector_mask) != 1)
return -EINVAL;
if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
format = gdrm->xrgb8888_emulation_format;
for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc)
break;
}
/*
* DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
* the connector included in the state.
*/
if (!connector_state) {
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc) {
connector_state = connector->state;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
}
if (WARN_ON_ONCE(!connector_state))
return -ENOENT;
len = struct_size(req, properties,
GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM);
req = kzalloc(len, GFP_KERNEL);
if (!req)
return -ENOMEM;
gud_from_display_mode(&req->mode, mode);
req->format = gud_from_fourcc(format->format);
if (WARN_ON_ONCE(!req->format)) {
ret = -EINVAL;
goto out;
}
req->connector = drm_connector_index(connector_state->connector);
ret = gud_connector_fill_properties(connector_state, req->properties);
if (ret < 0)
goto out;
num_properties = ret;
for (i = 0; i < gdrm->num_properties; i++) {
u16 prop = gdrm->properties[i];
u64 val;
switch (prop) {
case GUD_PROPERTY_ROTATION:
/* DRM UAPI matches the protocol so use value directly */
val = new_plane_state->rotation;
break;
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
goto out;
}
req->properties[num_properties + i].prop = cpu_to_le16(prop);
req->properties[num_properties + i].val = cpu_to_le64(val);
num_properties++;
}
if (drm_dev_enter(fb->dev, &idx)) {
len = struct_size(req, properties, num_properties);
ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
drm_dev_exit(idx);
} else {
ret = -ENODEV;
}
out:
kfree(req);
return ret;
}
void gud_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_device *drm = pipe->crtc.dev;
struct gud_device *gdrm = to_gud_device(drm);
struct drm_plane_state *state = pipe->plane.state;
struct drm_framebuffer *fb = state->fb;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect damage;
int idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
mutex_lock(&gdrm->damage_lock);
if (gdrm->fb) {
drm_framebuffer_put(gdrm->fb);
gdrm->fb = NULL;
}
gud_clear_damage(gdrm);
mutex_unlock(&gdrm->damage_lock);
}
if (!drm_dev_enter(drm, &idx))
return;
if (!old_state->fb)
gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
if (crtc->state->active_changed)
gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
drm_rect_init(&damage, 0, 0, fb->width, fb->height);
gud_fb_queue_damage(gdrm, fb, &damage);
}
if (!crtc->state->enable)
gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
drm_dev_exit(idx);
}

Просмотреть файл

@ -24,6 +24,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
@ -37,7 +38,6 @@
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
struct ingenic_dma_hwdesc {
@ -561,7 +561,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
height = newstate->src_h >> 16;
cpp = newstate->fb->format->cpp[0];
if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
hwdesc = &priv->dma_hwdescs->hwdesc_f0;
else
hwdesc = &priv->dma_hwdescs->hwdesc_f1;
@ -833,6 +833,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
const struct jz_soc_info *soc_info;
struct ingenic_drm *priv;
struct clk *parent_clk;
struct drm_plane *primary;
struct drm_bridge *bridge;
struct drm_panel *panel;
struct drm_encoder *encoder;
@ -947,9 +948,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
if (soc_info->has_osd)
priv->ipu_plane = drm_plane_from_index(drm, 0);
drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
ret = drm_universal_plane_init(drm, &priv->f1, 1,
drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
ret = drm_universal_plane_init(drm, primary, 1,
&ingenic_drm_primary_plane_funcs,
priv->soc_info->formats_f1,
priv->soc_info->num_formats_f1,
@ -961,7 +964,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
NULL, &ingenic_drm_crtc_funcs, NULL);
if (ret) {
dev_err(dev, "Failed to init CRTC: %i\n", ret);
@ -1021,20 +1024,17 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
bridge = devm_drm_panel_bridge_add_typed(dev, panel,
DRM_MODE_CONNECTOR_DPI);
encoder = devm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder)
return -ENOMEM;
encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DPI, NULL);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
dev_err(dev, "Failed to init encoder: %d\n", ret);
return ret;
}
encoder->possible_crtcs = 1;
drm_encoder_helper_add(encoder, &ingenic_drm_encoder_helper_funcs);
ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DPI);
if (ret) {
dev_err(dev, "Failed to init encoder: %d\n", ret);
return ret;
}
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
dev_err(dev, "Unable to attach bridge\n");

Просмотреть файл

@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
DSI_MCTL_MAIN_DATA_CTL_READ_EN |
DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);

Просмотреть файл

@ -45,7 +45,7 @@
* The ENCI is designed for PAl or NTSC encoding and can go through the VDAC
* directly for CVBS encoding or through the ENCI_DVI encoder for HDMI.
* The ENCP is designed for Progressive encoding but can also generate
* 1080i interlaced pixels, and was initialy desined to encode pixels for
* 1080i interlaced pixels, and was initially designed to encode pixels for
* VDAC to output RGB ou YUV analog outputs.
* It's output is only used through the ENCP_DVI encoder for HDMI.
* The ENCL LVDS encoder is not implemented.

Просмотреть файл

@ -886,9 +886,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
NV_INFO(drm, "MM: using %s for buffer copies\n", name);
}
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *new_reg)
static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo);
@ -974,7 +973,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
return ret;
}
nouveau_bo_move_ntfy(bo, evict, new_reg);
nouveau_bo_move_ntfy(bo, new_reg);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
goto out_ntfy;
@ -1039,9 +1038,7 @@ out:
}
out_ntfy:
if (ret) {
swap(*new_reg, bo->mem);
nouveau_bo_move_ntfy(bo, false, new_reg);
swap(*new_reg, bo->mem);
nouveau_bo_move_ntfy(bo, &bo->mem);
}
return ret;
}
@ -1315,7 +1312,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl
static void
nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
nouveau_bo_move_ntfy(bo, false, NULL);
nouveau_bo_move_ntfy(bo, NULL);
}
struct ttm_device_funcs nouveau_bo_driver = {

Просмотреть файл

@ -2090,9 +2090,8 @@ static s32 pixinc(int pixels, u8 ps)
return 1 + (pixels - 1) * ps;
else if (pixels < 0)
return 1 - (-pixels + 1) * ps;
else
BUG();
return 0;
BUG();
}
static void calc_offset(u16 screen_width, u16 width,

Просмотреть файл

@ -4327,7 +4327,8 @@ static int omap_dsi_register_te_irq(struct dsi_data *dsi,
irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
IRQF_TRIGGER_RISING, "TE", dsi);
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"TE", dsi);
if (err) {
dev_err(dsi->dev, "request irq failed with %d\n", err);
gpiod_put(dsi->te_gpio);

Просмотреть файл

@ -48,16 +48,15 @@
#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
struct dss_device;
struct omap_drm_private;
struct omap_dss_device;
struct dispc_device;
struct drm_connector;
struct dss_device;
struct dss_lcd_mgr_config;
struct hdmi_avi_infoframe;
struct omap_drm_private;
struct omap_dss_device;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
struct hdmi_avi_infoframe;
struct drm_connector;
enum omap_display_type {
OMAP_DISPLAY_TYPE_NONE = 0,

Просмотреть файл

@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
*/
dsi->hs_rate = 349440000;
dsi->lp_rate = 9600000;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_EOT_PACKET;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
/*
* Every new incarnation of this display must have a unique

Просмотреть файл

@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
* As we only send commands we do not need to be continuously
* clocked.
*/
dsi->mode_flags =
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_EOT_PACKET;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
s6->supply = devm_regulator_get(dev, "vdd1");
if (IS_ERR(s6->supply))

Просмотреть файл

@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
dsi->hs_rate = 349440000;
dsi->lp_rate = 9600000;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_EOT_PACKET |
MIPI_DSI_MODE_VIDEO_BURST;
ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,

Просмотреть файл

@ -376,12 +376,13 @@ static int panel_simple_get_hpd_gpio(struct device *dev,
return 0;
}
static int panel_simple_prepare(struct drm_panel *panel)
static int panel_simple_prepare_once(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
unsigned int delay;
int err;
int hpd_asserted;
unsigned long hpd_wait_us;
if (p->prepared_time != 0)
return 0;
@ -406,25 +407,63 @@ static int panel_simple_prepare(struct drm_panel *panel)
if (IS_ERR(p->hpd_gpio)) {
err = panel_simple_get_hpd_gpio(panel->dev, p, false);
if (err)
return err;
goto error;
}
if (p->desc->delay.hpd_absent_delay)
hpd_wait_us = p->desc->delay.hpd_absent_delay * 1000UL;
else
hpd_wait_us = 2000000;
err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
hpd_asserted, hpd_asserted,
1000, 2000000);
1000, hpd_wait_us);
if (hpd_asserted < 0)
err = hpd_asserted;
if (err) {
dev_err(panel->dev,
"error waiting for hpd GPIO: %d\n", err);
return err;
if (err != -ETIMEDOUT)
dev_err(panel->dev,
"error waiting for hpd GPIO: %d\n", err);
goto error;
}
}
p->prepared_time = ktime_get();
return 0;
error:
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
p->unprepared_time = ktime_get();
return err;
}
/*
* Some panels simply don't always come up and need to be power cycled to
* work properly. We'll allow for a handful of retries.
*/
#define MAX_PANEL_PREPARE_TRIES 5
static int panel_simple_prepare(struct drm_panel *panel)
{
int ret;
int try;
for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
ret = panel_simple_prepare_once(panel);
if (ret != -ETIMEDOUT)
break;
}
if (ret == -ETIMEDOUT)
dev_err(panel->dev, "Prepare timeout after %d tries\n", try);
else if (try)
dev_warn(panel->dev, "Prepare needed %d retries\n", try);
return ret;
}
static int panel_simple_enable(struct drm_panel *panel)
@ -1445,6 +1484,7 @@ static const struct panel_desc boe_nv110wtm_n61 = {
.delay = {
.hpd_absent_delay = 200,
.prepare_to_enable = 80,
.enable = 50,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
@ -2368,6 +2408,36 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
static const struct drm_display_mode innolux_n116bca_ea1_mode = {
.clock = 76420,
.hdisplay = 1366,
.hsync_start = 1366 + 136,
.hsync_end = 1366 + 136 + 30,
.htotal = 1366 + 136 + 30 + 60,
.vdisplay = 768,
.vsync_start = 768 + 8,
.vsync_end = 768 + 8 + 12,
.vtotal = 768 + 8 + 12 + 12,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc innolux_n116bca_ea1 = {
.modes = &innolux_n116bca_ea1_mode,
.num_modes = 1,
.bpc = 6,
.size = {
.width = 256,
.height = 144,
},
.delay = {
.hpd_absent_delay = 200,
.prepare_to_enable = 80,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.connector_type = DRM_MODE_CONNECTOR_eDP,
};
/*
* Datasheet specifies that at 60 Hz refresh rate:
* - total horizontal time: { 1506, 1592, 1716 }
@ -4283,6 +4353,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
.compatible = "innolux,n116bca-ea1",
.data = &innolux_n116bca_ea1,
}, {
.compatible = "innolux,n116bge",
.data = &innolux_n116bge,

Просмотреть файл

@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_BURST;
else
dsi->mode_flags =
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_EOT_PACKET;
MIPI_DSI_CLOCK_NON_CONTINUOUS;
acx->supply = devm_regulator_get(dev, "vddi");
if (IS_ERR(acx->supply))

Просмотреть файл

@ -426,16 +426,13 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}

Просмотреть файл

@ -121,7 +121,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
struct qxl_bo *qbo;
@ -144,29 +143,22 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int ret;
qxl_bo_move_notify(bo, evict, new_mem);
qxl_bo_move_notify(bo, new_mem);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
goto out;
return ret;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
out:
if (ret) {
swap(*new_mem, bo->mem);
qxl_bo_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
}
return ret;
return ttm_bo_move_memcpy(bo, ctx, new_mem);
}
static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
qxl_bo_move_notify(bo, false, NULL);
qxl_bo_move_notify(bo, NULL);
}
static struct ttm_device_funcs qxl_bo_driver = {

Просмотреть файл

@ -879,8 +879,6 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
static void vop_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc *crtc = new_state->crtc;

Просмотреть файл

@ -453,7 +453,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
return;
fence = READ_ONCE(entity->last_scheduled);
@ -467,8 +467,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
spin_unlock(&entity->rq_lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
}
/**

Просмотреть файл

@ -363,8 +363,7 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->vdd_supply = devm_regulator_get(dev, "phy-dsi");
if (IS_ERR(dsi->vdd_supply)) {
ret = PTR_ERR(dsi->vdd_supply);
if (ret != -EPROBE_DEFER)
DRM_ERROR("Failed to request regulator: %d\n", ret);
dev_err_probe(dev, ret, "Failed to request regulator\n");
return ret;
}
@ -377,9 +376,7 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
if (ret != -EPROBE_DEFER)
DRM_ERROR("Unable to get pll reference clock: %d\n",
ret);
dev_err_probe(dev, ret, "Unable to get pll reference clock\n");
goto err_clk_get;
}
@ -419,7 +416,7 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->dsi = dw_mipi_dsi_probe(pdev, &dw_mipi_dsi_stm_plat_data);
if (IS_ERR(dsi->dsi)) {
ret = PTR_ERR(dsi->dsi);
DRM_ERROR("Failed to initialize mipi dsi host: %d\n", ret);
dev_err_probe(dev, ret, "Failed to initialize mipi dsi host\n");
goto err_dsi_probe;
}

Просмотреть файл

@ -31,6 +31,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <video/videomode.h>
@ -1054,14 +1055,6 @@ cleanup:
return ret;
}
/*
* DRM_ENCODER
*/
static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static void ltdc_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *ddev = encoder->dev;
@ -1122,8 +1115,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
encoder->possible_crtcs = CRTC_MASK;
encoder->possible_clones = 0; /* No cloning support */
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
drm_simple_encoder_init(ddev, encoder, DRM_MODE_ENCODER_DPI);
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);

Просмотреть файл

@ -203,18 +203,19 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
unsigned long clk_rate, real_rate, req_rate;
unsigned long clk_rate, real_pclk_rate, pclk_rate;
unsigned int clkdiv;
int ret;
clkdiv = 2; /* first try using a standard divider of 2 */
/* mode.clock is in KHz, set_rate wants parameter in Hz */
req_rate = crtc->mode.clock * 1000;
pclk_rate = crtc->mode.clock * 1000;
ret = clk_set_rate(priv->clk, req_rate * clkdiv);
ret = clk_set_rate(priv->clk, pclk_rate * clkdiv);
clk_rate = clk_get_rate(priv->clk);
if (ret < 0 || tilcdc_pclk_diff(req_rate, clk_rate) > 5) {
real_pclk_rate = clk_rate / clkdiv;
if (ret < 0 || tilcdc_pclk_diff(pclk_rate, real_pclk_rate) > 5) {
/*
* If we fail to set the clock rate (some architectures don't
* use the common clock framework yet and may not implement
@ -229,7 +230,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
return;
}
clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
clkdiv = DIV_ROUND_CLOSEST(clk_rate, pclk_rate);
/*
* Emit a warning if the real clock rate resulting from the
@ -238,12 +239,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
* 5% is an arbitrary value - LCDs are usually quite tolerant
* about pixel clock rates.
*/
real_rate = clkdiv * req_rate;
real_pclk_rate = clk_rate / clkdiv;
if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
if (tilcdc_pclk_diff(pclk_rate, real_pclk_rate) > 5) {
dev_warn(dev->dev,
"effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
clk_rate, real_rate);
"effective pixel clock rate (%luHz) differs from the requested rate (%luHz)\n",
real_pclk_rate, pclk_rate);
}
}

Просмотреть файл

@ -399,7 +399,6 @@ static struct platform_driver panel_driver = {
.probe = panel_probe,
.remove = panel_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tilcdc-panel",
.of_match_table = panel_of_match,
},

Просмотреть файл

@ -73,7 +73,6 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
list_del_init(&bo->swap);
list_del_init(&bo->lru);
if (bdev->funcs->del_from_lru_notify)
@ -105,16 +104,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
man = ttm_manager_type(bdev, mem->mem_type);
list_move_tail(&bo->lru, &man->lru[bo->priority]);
if (man->use_tt && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
struct list_head *swap;
swap = &ttm_glob.swap_lru[bo->priority];
list_move_tail(&bo->swap, swap);
} else {
list_del_init(&bo->swap);
}
if (bdev->funcs->del_from_lru_notify)
bdev->funcs->del_from_lru_notify(bo);
@ -129,9 +118,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
break;
}
if (bo->ttm && !(bo->ttm->page_flags &
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
}
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
@ -169,20 +155,6 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
&pos->last->lru);
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
struct list_head *lru;
if (!pos->first)
continue;
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
lru = &ttm_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
@ -271,9 +243,9 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
* reference it any more. The only tricky case is the trylock on
* the resv object while holding the lru_lock.
*/
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bo->bdev->lru_lock);
bo->base.resv = &bo->base._resv;
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
}
return r;
@ -332,7 +304,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
30 * HZ);
@ -342,7 +314,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bo->bdev->lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@ -352,7 +324,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
ret = 0;
@ -361,13 +333,13 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
return ret;
}
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@ -384,13 +356,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
{
struct ttm_global *glob = &ttm_glob;
struct list_head removed;
bool empty;
INIT_LIST_HEAD(&removed);
spin_lock(&glob->lru_lock);
spin_lock(&bdev->lru_lock);
while (!list_empty(&bdev->ddestroy)) {
struct ttm_buffer_object *bo;
@ -401,24 +372,24 @@ bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
continue;
if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->lru_lock);
dma_resv_lock(bo->base.resv, NULL);
spin_lock(&glob->lru_lock);
spin_lock(&bdev->lru_lock);
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else if (dma_resv_trylock(bo->base.resv)) {
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else {
spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->lru_lock);
}
ttm_bo_put(bo);
spin_lock(&glob->lru_lock);
spin_lock(&bdev->lru_lock);
}
list_splice_tail(&removed, &bdev->ddestroy);
empty = list_empty(&bdev->ddestroy);
spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->lru_lock);
return empty;
}
@ -453,7 +424,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bo->bdev->lru_lock);
/*
* Make pinned bos immediately available to
@ -470,17 +441,17 @@ static void ttm_bo_release(struct kref *kref)
kref_init(&bo->kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
return;
}
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bo->bdev->lru_lock);
ttm_bo_del_from_lru(bo);
list_del(&bo->ddestroy);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
dma_resv_unlock(bo->base.resv);
@ -654,7 +625,7 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
unsigned i;
int ret;
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@ -691,7 +662,7 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
if (!bo) {
if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
busy_bo = NULL;
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bdev->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
@ -705,7 +676,7 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
return ret;
}
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bdev->lru_lock);
ret = ttm_bo_evict(bo, ctx);
if (locked)
@ -805,10 +776,9 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = place->mem_type;
mem->placement = place->flags;
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bo->bdev->lru_lock);
ttm_bo_move_to_lru_tail(bo, mem, NULL);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
@ -1065,7 +1035,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
kref_init(&bo->kref);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
bo->bdev = bdev;
bo->type = type;
bo->mem.mem_type = TTM_PL_SYSTEM;
@ -1193,56 +1162,34 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_buffer_object *bo;
int ret = -EBUSY;
bool locked;
unsigned i;
int ret;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
NULL))
continue;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
return -EBUSY;
if (!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
continue;
}
ret = 0;
break;
}
if (!ret)
break;
}
if (ret) {
spin_unlock(&glob->lru_lock);
return ret;
if (!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
}
if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
return ret;
return 0;
}
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
/**
/*
* Move to system cached
*/
if (bo->mem.mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource evict_mem;
@ -1262,29 +1209,26 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
}
}
/**
/*
* Make sure BO is idle.
*/
ret = ttm_bo_wait(bo, false, false);
if (unlikely(ret != 0))
goto out;
ttm_bo_unmap_virtual(bo);
/**
/*
* Swap out. Buffer will be swapped in again as soon as
* anyone tries to access a ttm page.
*/
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
out:
/**
*
/*
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer.
*/
@ -1293,7 +1237,6 @@ out:
ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_swapout);
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{

Просмотреть файл

@ -303,7 +303,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);

Просмотреть файл

@ -53,7 +53,6 @@ static void ttm_global_release(void)
goto out;
ttm_pool_mgr_fini();
ttm_tt_mgr_fini();
__free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob));
@ -64,10 +63,9 @@ out:
static int ttm_global_init(void)
{
struct ttm_global *glob = &ttm_glob;
unsigned long num_pages;
unsigned long num_pages, num_dma32;
struct sysinfo si;
int ret = 0;
unsigned i;
mutex_lock(&ttm_global_mutex);
if (++ttm_glob_use_count > 1)
@ -79,10 +77,16 @@ static int ttm_global_init(void)
* system memory.
*/
num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
ttm_pool_mgr_init(num_pages * 50 / 100);
ttm_tt_mgr_init();
num_pages /= 2;
/* But for DMA32 we limit ourself to only use 2GiB maximum. */
num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
>> PAGE_SHIFT;
num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32);
spin_lock_init(&glob->lru_lock);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@ -90,8 +94,6 @@ static int ttm_global_init(void)
goto out;
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list);
atomic_set(&glob->bo_count, 0);
@ -102,6 +104,67 @@ out:
return ret;
}
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
*/
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_device *bdev;
int ret = -EBUSY;
mutex_lock(&ttm_global_mutex);
list_for_each_entry(bdev, &glob->device_list, device_list) {
ret = ttm_device_swapout(bdev, ctx, gfp_flags);
if (ret > 0) {
list_move_tail(&bdev->device_list, &glob->device_list);
break;
}
}
mutex_unlock(&ttm_global_mutex);
return ret;
}
EXPORT_SYMBOL(ttm_global_swapout);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
struct ttm_resource_manager *man;
struct ttm_buffer_object *bo;
unsigned i, j;
int ret;
spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
list_for_each_entry(bo, &man->lru[j], lru) {
uint32_t num_pages;
if (!bo->ttm ||
bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
continue;
num_pages = bo->ttm->num_pages;
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
return num_pages;
if (ret != -EBUSY)
return ret;
}
}
}
spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
static void ttm_init_sysman(struct ttm_device *bdev)
{
struct ttm_resource_manager *man = &bdev->sysman;
@ -164,6 +227,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
@ -176,7 +240,6 @@ EXPORT_SYMBOL(ttm_device_init);
void ttm_device_fini(struct ttm_device *bdev)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_resource_manager *man;
unsigned i;
@ -193,11 +256,11 @@ void ttm_device_fini(struct ttm_device *bdev)
if (ttm_bo_delayed_delete(bdev, true))
pr_debug("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->lru_lock);
ttm_pool_fini(&bdev->pool);
ttm_global_release();

Просмотреть файл

@ -51,14 +51,12 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
if (list_empty(list))
return;
spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@ -154,7 +152,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
if (list_empty(list))
return;
spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@ -162,10 +159,9 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}

Просмотреть файл

@ -91,7 +91,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
.no_wait_gpu = false,
.force_alloc = true
};
struct ttm_global *glob = &ttm_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@ -100,18 +99,18 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->lru_lock);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
NULL);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
spin_lock(&bdev->lru_lock);
}
}
spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->lru_lock);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);

Просмотреть файл

@ -40,8 +40,18 @@
#include "ttm_module.h"
static struct shrinker mm_shrinker;
static atomic_long_t swapable_pages;
static unsigned long ttm_pages_limit;
MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
static unsigned long ttm_dma32_pages_limit;
MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
static atomic_long_t ttm_pages_allocated;
static atomic_long_t ttm_dma32_pages_allocated;
/*
* Allocates a ttm structure for the given BO.
@ -294,8 +304,6 @@ static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i)
ttm->pages[i]->mapping = bdev->dev_mapping;
atomic_long_add(ttm->num_pages, &swapable_pages);
}
int ttm_tt_populate(struct ttm_device *bdev,
@ -309,12 +317,25 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages, &ttm_dma32_pages_allocated);
while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
atomic_long_read(&ttm_dma32_pages_allocated) >
ttm_dma32_pages_limit) {
ret = ttm_global_swapout(ctx, GFP_KERNEL);
if (ret)
goto error;
}
if (bdev->funcs->ttm_tt_populate)
ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
else
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
return ret;
goto error;
ttm_tt_add_mapping(bdev, ttm);
ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
@ -327,6 +348,12 @@ int ttm_tt_populate(struct ttm_device *bdev,
}
return 0;
error:
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages, &ttm_dma32_pages_allocated);
return ret;
}
EXPORT_SYMBOL(ttm_tt_populate);
@ -342,12 +369,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
(*page)->mapping = NULL;
(*page++)->index = 0;
}
atomic_long_sub(ttm->num_pages, &swapable_pages);
}
void ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
@ -357,76 +381,24 @@ void ttm_tt_unpopulate(struct ttm_device *bdev,
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages, &ttm_dma32_pages_allocated);
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}
/* As long as pages are available make sure to release at least one */
static unsigned long ttm_tt_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct ttm_operation_ctx ctx = {
.no_wait_gpu = false
};
int ret;
ret = ttm_bo_swapout(&ctx, GFP_NOFS);
return ret < 0 ? SHRINK_EMPTY : ret;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
static unsigned long ttm_tt_shrinker_count(struct shrinker *shrink,
struct shrink_control *sc)
{
unsigned long num_pages;
num_pages = atomic_long_read(&swapable_pages);
return num_pages ? num_pages : SHRINK_EMPTY;
}
#ifdef CONFIG_DEBUG_FS
/* Test the shrinker functions and dump the result */
static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
{
struct shrink_control sc = { .gfp_mask = GFP_KERNEL };
fs_reclaim_acquire(GFP_KERNEL);
seq_printf(m, "%lu/%lu\n", ttm_tt_shrinker_count(&mm_shrinker, &sc),
ttm_tt_shrinker_scan(&mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
#endif
/**
* ttm_tt_mgr_init - register with the MM shrinker
*
* Register with the MM shrinker for swapping out BOs.
*/
int ttm_tt_mgr_init(void)
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
{
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_tt_debugfs_shrink_fops);
#endif
if (!ttm_pages_limit)
ttm_pages_limit = num_pages;
mm_shrinker.count_objects = ttm_tt_shrinker_count;
mm_shrinker.scan_objects = ttm_tt_shrinker_scan;
mm_shrinker.seeks = 1;
return register_shrinker(&mm_shrinker);
}
/**
* ttm_tt_mgr_fini - unregister our MM shrinker
*
* Unregisters the MM shrinker.
*/
void ttm_tt_mgr_fini(void)
{
unregister_shrinker(&mm_shrinker);
if (!ttm_dma32_pages_limit)
ttm_dma32_pages_limit = num_dma32_pages;
}

Просмотреть файл

@ -12,15 +12,13 @@
int vbox_mm_init(struct vbox_private *vbox)
{
struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = &vbox->ddev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(pdev, 0),
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
if (ret) {
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
@ -33,5 +31,4 @@ int vbox_mm_init(struct vbox_private *vbox)
void vbox_mm_fini(struct vbox_private *vbox)
{
arch_phys_wc_del(vbox->fb_mtrr);
drm_vram_helper_release_mm(&vbox->ddev);
}

Просмотреть файл

@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if (!sync_file) {
dma_fence_put(&out_fence->f);
ret = -ENOMEM;
goto out_memdup;
goto out_unresv;
}
exbuf->fence_fd = out_fence_fd;

Просмотреть файл

@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
if (ret != 0) {
virtio_gpu_array_put_free(objs);
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}

Просмотреть файл

@ -22,7 +22,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);

Просмотреть файл

@ -38,6 +38,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_device.h>
#include "ttm_memory.h"
@ -277,7 +278,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
ret = ttm_bo_swapout(ctx, GFP_KERNEL);
ret = ttm_global_swapout(ctx, GFP_KERNEL);
spin_lock(&glob->lock);
if (unlikely(ret < 0))
break;

Просмотреть файл

@ -1371,7 +1371,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
while (ttm_bo_swapout(&ctx, GFP_KERNEL) > 0);
while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
if (dev_priv->enable_fb)
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {

Просмотреть файл

@ -775,7 +775,8 @@ extern void vmw_resource_unreserve(struct vmw_resource *res,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
struct ttm_resource *old_mem,
struct ttm_resource *new_mem);
extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);

Просмотреть файл

@ -847,13 +847,15 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
* vmw_query_move_notify - Read back cached query states
*
* @bo: The TTM buffer object about to move.
* @mem: The memory region @bo is moving to.
* @old_mem: The memory region @bo is moving from.
* @new_mem: The memory region @bo is moving to.
*
* Called before the query MOB is swapped out to read back cached query
* states from the device.
*/
void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem)
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
struct vmw_buffer_object *dx_query_mob;
struct ttm_device *bdev = bo->bdev;
@ -871,7 +873,8 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
}
/* If BO is being moved from MOB to system memory */
if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
if (new_mem->mem_type == TTM_PL_SYSTEM &&
old_mem->mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
(void) vmw_query_readback_all(dx_query_mob);

Просмотреть файл

@ -691,21 +691,19 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @evict: Unused
* @mem: The struct ttm_resource indicating to what memory
* @old_mem: The old memory where we move from
* @new_mem: The struct ttm_resource indicating to what memory
* region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
* (currently only resources).
*/
static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *mem)
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
if (!mem)
return;
vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
vmw_bo_move_notify(bo, new_mem);
vmw_query_move_notify(bo, old_mem, new_mem);
}
@ -736,7 +734,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
return ret;
}
vmw_move_notify(bo, evict, new_mem);
vmw_move_notify(bo, &bo->mem, new_mem);
if (old_man->use_tt && new_man->use_tt) {
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
@ -758,18 +756,10 @@ static int vmw_move(struct ttm_buffer_object *bo,
}
return 0;
fail:
swap(*new_mem, bo->mem);
vmw_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
vmw_move_notify(bo, new_mem, &bo->mem);
return ret;
}
static void
vmw_delete_mem_notify(struct ttm_buffer_object *bo)
{
vmw_move_notify(bo, false, NULL);
}
struct ttm_device_funcs vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
@ -779,7 +769,6 @@ struct ttm_device_funcs vmw_bo_driver = {
.evict_flags = vmw_evict_flags,
.move = vmw_move,
.verify_access = vmw_verify_access,
.delete_mem_notify = vmw_delete_mem_notify,
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};

Просмотреть файл

@ -52,13 +52,6 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
get_page(page);
if (vmf->vma->vm_file)
page->mapping = vmf->vma->vm_file->f_mapping;
else
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
page->index = vmf->pgoff;
vmf->page = page;
@ -151,17 +144,6 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
static int fb_deferred_io_set_page_dirty(struct page *page)
{
if (!PageDirty(page))
SetPageDirty(page);
return 0;
}
static const struct address_space_operations fb_deferred_io_aops = {
.set_page_dirty = fb_deferred_io_set_page_dirty,
};
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
@ -212,29 +194,12 @@ void fb_deferred_io_init(struct fb_info *info)
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file)
{
file->f_mapping->a_ops = &fb_deferred_io_aops;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
int i;
BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);

Просмотреть файл

@ -1415,10 +1415,6 @@ __releases(&info->lock)
if (res)
module_put(info->fbops->owner);
}
#ifdef CONFIG_FB_DEFERRED_IO
if (info->fbdefio)
fb_deferred_io_open(info, inode, file);
#endif
out:
unlock_fb_info(info);
if (res)

Просмотреть файл

@ -29,7 +29,7 @@ extern bool omapfb_debug;
printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__); \
} while (0)
#else
#define DBG(format, ...)
#define DBG(format, ...) no_printk(format, ## __VA_ARGS__)
#endif
#define FB2OFB(fb_info) ((struct omapfb_info *)(fb_info->par))

Просмотреть файл

@ -22,6 +22,10 @@
#ifndef DRM_DISPLAYID_H
#define DRM_DISPLAYID_H
#include <linux/types.h>
struct edid;
#define DATA_BLOCK_PRODUCT_ID 0x00
#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
@ -52,7 +56,7 @@
#define PRODUCT_TYPE_REPEATER 5
#define PRODUCT_TYPE_DIRECT_DRIVE 6
struct displayid_hdr {
struct displayid_header {
u8 rev;
u8 bytes;
u8 prod_id;
@ -92,12 +96,22 @@ struct displayid_detailed_timing_block {
struct displayid_detailed_timings_1 timings[];
};
#define for_each_displayid_db(displayid, block, idx, length) \
for ((block) = (struct displayid_block *)&(displayid)[idx]; \
(idx) + sizeof(struct displayid_block) <= (length) && \
(idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
(block)->num_bytes > 0; \
(idx) += sizeof(struct displayid_block) + (block)->num_bytes, \
(block) = (struct displayid_block *)&(displayid)[idx])
/* DisplayID iteration */
struct displayid_iter {
const struct edid *edid;
const u8 *section;
int length;
int idx;
int ext_index;
};
void displayid_iter_edid_begin(const struct edid *edid,
struct displayid_iter *iter);
const struct displayid_block *
__displayid_iter_next(struct displayid_iter *iter);
#define displayid_iter_for_each(__block, __iter) \
while (((__block) = __displayid_iter_next(__iter)))
void displayid_iter_end(struct displayid_iter *iter);
#endif

Просмотреть файл

@ -74,7 +74,7 @@ enum drm_driver_feature {
* @DRIVER_ATOMIC:
*
* Driver supports the full atomic modesetting userspace API. Drivers
* which only use atomic internally, but do not the support the full
* which only use atomic internally, but do not support the full
* userspace API (e.g. not all properties converted to atomic, or
* multi-plane updates are not guaranteed to be tear-free) should not
* set this flag.

Просмотреть файл

@ -543,5 +543,8 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
struct drm_display_mode *
drm_display_mode_from_cea_vic(struct drm_device *dev,
u8 video_code);
const u8 *drm_find_edid_extension(const struct edid *edid,
int ext_id, int *ext_index);
#endif /* __DRM_EDID_H__ */

Просмотреть файл

@ -224,6 +224,24 @@ void *__drmm_encoder_alloc(struct drm_device *dev,
offsetof(type, member), funcs, \
encoder_type, name, ##__VA_ARGS__))
/**
* drmm_plain_encoder_alloc - Allocate and initialize an encoder
* @dev: drm device
* @funcs: callbacks for this encoder (optional)
* @encoder_type: user visible type of the encoder
* @name: printf style format string for the encoder name, or NULL for default name
*
* This is a simplified version of drmm_encoder_alloc(), which only allocates
* and returns a struct drm_encoder instance, with no subclassing.
*
* Returns:
* Pointer to the new drm_encoder struct, or ERR_PTR on failure.
*/
#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \
((struct drm_encoder *) \
__drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \
0, funcs, encoder_type, name, ##__VA_ARGS__))
/**
* drm_encoder_index - find the index of a registered encoder
* @encoder: encoder to find index for

333
include/drm/gud.h Normal file
Просмотреть файл

@ -0,0 +1,333 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Noralf Trønnes
*/
#ifndef __LINUX_GUD_H
#define __LINUX_GUD_H
#include <linux/types.h>
/*
* struct gud_display_descriptor_req - Display descriptor
* @magic: Magic value GUD_DISPLAY_MAGIC
* @version: Protocol version
* @flags: Flags
* - STATUS_ON_SET: Always do a status request after a SET request.
* This is used by the Linux gadget driver since it has
* no way to control the status stage of a control OUT
* request that has a payload.
* - FULL_UPDATE: Always send the entire framebuffer when flushing changes.
* The GUD_REQ_SET_BUFFER request will not be sent
* before each bulk transfer, it will only be sent if the
* previous bulk transfer had failed. This gives the device
* a chance to reset its state machine if needed.
* This flag can not be used in combination with compression.
* @compression: Supported compression types
* - GUD_COMPRESSION_LZ4: LZ4 lossless compression.
* @max_buffer_size: Maximum buffer size the device can handle (optional).
* This is useful for devices that don't have a big enough
* buffer to decompress the entire framebuffer in one go.
* @min_width: Minimum pixel width the controller can handle
* @max_width: Maximum width
* @min_height: Minimum height
* @max_height: Maximum height
*
* Devices that have only one display mode will have min_width == max_width
* and min_height == max_height.
*/
struct gud_display_descriptor_req {
__le32 magic;
#define GUD_DISPLAY_MAGIC 0x1d50614d
__u8 version;
__le32 flags;
#define GUD_DISPLAY_FLAG_STATUS_ON_SET BIT(0)
#define GUD_DISPLAY_FLAG_FULL_UPDATE BIT(1)
__u8 compression;
#define GUD_COMPRESSION_LZ4 BIT(0)
__le32 max_buffer_size;
__le32 min_width;
__le32 max_width;
__le32 min_height;
__le32 max_height;
} __packed;
/*
* struct gud_property_req - Property
* @prop: Property
* @val: Value
*/
struct gud_property_req {
__le16 prop;
__le64 val;
} __packed;
/*
* struct gud_display_mode_req - Display mode
* @clock: Pixel clock in kHz
* @hdisplay: Horizontal display size
* @hsync_start: Horizontal sync start
* @hsync_end: Horizontal sync end
* @htotal: Horizontal total size
* @vdisplay: Vertical display size
* @vsync_start: Vertical sync start
* @vsync_end: Vertical sync end
* @vtotal: Vertical total size
* @flags: Bits 0-13 are the same as in the RandR protocol and also what DRM uses.
* The deprecated bits are reused for internal protocol flags leaving us
* free to follow DRM for the other bits in the future.
* - FLAG_PREFERRED: Set on the preferred display mode.
*/
struct gud_display_mode_req {
__le32 clock;
__le16 hdisplay;
__le16 hsync_start;
__le16 hsync_end;
__le16 htotal;
__le16 vdisplay;
__le16 vsync_start;
__le16 vsync_end;
__le16 vtotal;
__le32 flags;
#define GUD_DISPLAY_MODE_FLAG_PHSYNC BIT(0)
#define GUD_DISPLAY_MODE_FLAG_NHSYNC BIT(1)
#define GUD_DISPLAY_MODE_FLAG_PVSYNC BIT(2)
#define GUD_DISPLAY_MODE_FLAG_NVSYNC BIT(3)
#define GUD_DISPLAY_MODE_FLAG_INTERLACE BIT(4)
#define GUD_DISPLAY_MODE_FLAG_DBLSCAN BIT(5)
#define GUD_DISPLAY_MODE_FLAG_CSYNC BIT(6)
#define GUD_DISPLAY_MODE_FLAG_PCSYNC BIT(7)
#define GUD_DISPLAY_MODE_FLAG_NCSYNC BIT(8)
#define GUD_DISPLAY_MODE_FLAG_HSKEW BIT(9)
/* BCast and PixelMultiplex are deprecated */
#define GUD_DISPLAY_MODE_FLAG_DBLCLK BIT(12)
#define GUD_DISPLAY_MODE_FLAG_CLKDIV2 BIT(13)
#define GUD_DISPLAY_MODE_FLAG_USER_MASK \
(GUD_DISPLAY_MODE_FLAG_PHSYNC | GUD_DISPLAY_MODE_FLAG_NHSYNC | \
GUD_DISPLAY_MODE_FLAG_PVSYNC | GUD_DISPLAY_MODE_FLAG_NVSYNC | \
GUD_DISPLAY_MODE_FLAG_INTERLACE | GUD_DISPLAY_MODE_FLAG_DBLSCAN | \
GUD_DISPLAY_MODE_FLAG_CSYNC | GUD_DISPLAY_MODE_FLAG_PCSYNC | \
GUD_DISPLAY_MODE_FLAG_NCSYNC | GUD_DISPLAY_MODE_FLAG_HSKEW | \
GUD_DISPLAY_MODE_FLAG_DBLCLK | GUD_DISPLAY_MODE_FLAG_CLKDIV2)
/* Internal protocol flags */
#define GUD_DISPLAY_MODE_FLAG_PREFERRED BIT(10)
} __packed;
/*
* struct gud_connector_descriptor_req - Connector descriptor
* @connector_type: Connector type (GUD_CONNECTOR_TYPE_*).
* If the host doesn't support the type it should fall back to PANEL.
* @flags: Flags
* - POLL_STATUS: Connector status can change (polled every 10 seconds)
* - INTERLACE: Interlaced modes are supported
* - DOUBLESCAN: Doublescan modes are supported
*/
struct gud_connector_descriptor_req {
__u8 connector_type;
#define GUD_CONNECTOR_TYPE_PANEL 0
#define GUD_CONNECTOR_TYPE_VGA 1
#define GUD_CONNECTOR_TYPE_COMPOSITE 2
#define GUD_CONNECTOR_TYPE_SVIDEO 3
#define GUD_CONNECTOR_TYPE_COMPONENT 4
#define GUD_CONNECTOR_TYPE_DVI 5
#define GUD_CONNECTOR_TYPE_DISPLAYPORT 6
#define GUD_CONNECTOR_TYPE_HDMI 7
__le32 flags;
#define GUD_CONNECTOR_FLAGS_POLL_STATUS BIT(0)
#define GUD_CONNECTOR_FLAGS_INTERLACE BIT(1)
#define GUD_CONNECTOR_FLAGS_DOUBLESCAN BIT(2)
} __packed;
/*
* struct gud_set_buffer_req - Set buffer transfer info
* @x: X position of rectangle
* @y: Y position
* @width: Pixel width of rectangle
* @height: Pixel height
* @length: Buffer length in bytes
* @compression: Transfer compression
* @compressed_length: Compressed buffer length
*
* This request is issued right before the bulk transfer.
* @x, @y, @width and @height specifies the rectangle where the buffer should be
* placed inside the framebuffer.
*/
struct gud_set_buffer_req {
__le32 x;
__le32 y;
__le32 width;
__le32 height;
__le32 length;
__u8 compression;
__le32 compressed_length;
} __packed;
/*
* struct gud_state_req - Display state
* @mode: Display mode
* @format: Pixel format GUD_PIXEL_FORMAT_*
* @connector: Connector index
* @properties: Array of properties
*
* The entire state is transferred each time there's a change.
*/
struct gud_state_req {
struct gud_display_mode_req mode;
__u8 format;
__u8 connector;
struct gud_property_req properties[];
} __packed;
/* List of supported connector properties: */
/* Margins in pixels to deal with overscan, range 0-100 */
#define GUD_PROPERTY_TV_LEFT_MARGIN 1
#define GUD_PROPERTY_TV_RIGHT_MARGIN 2
#define GUD_PROPERTY_TV_TOP_MARGIN 3
#define GUD_PROPERTY_TV_BOTTOM_MARGIN 4
#define GUD_PROPERTY_TV_MODE 5
/* Brightness in percent, range 0-100 */
#define GUD_PROPERTY_TV_BRIGHTNESS 6
/* Contrast in percent, range 0-100 */
#define GUD_PROPERTY_TV_CONTRAST 7
/* Flicker reduction in percent, range 0-100 */
#define GUD_PROPERTY_TV_FLICKER_REDUCTION 8
/* Overscan in percent, range 0-100 */
#define GUD_PROPERTY_TV_OVERSCAN 9
/* Saturation in percent, range 0-100 */
#define GUD_PROPERTY_TV_SATURATION 10
/* Hue in percent, range 0-100 */
#define GUD_PROPERTY_TV_HUE 11
/*
* Backlight brightness is in the range 0-100 inclusive. The value represents the human perceptual
* brightness and not a linear PWM value. 0 is minimum brightness which should not turn the
* backlight completely off. The DPMS connector property should be used to control power which will
* trigger a GUD_REQ_SET_DISPLAY_ENABLE request.
*
* This does not map to a DRM property, it is used with the backlight device.
*/
#define GUD_PROPERTY_BACKLIGHT_BRIGHTNESS 12
/* List of supported properties that are not connector propeties: */
/*
* Plane rotation. Should return the supported bitmask on
* GUD_REQ_GET_PROPERTIES. GUD_ROTATION_0 is mandatory.
*
* Note: This is not display rotation so 90/270 will need scaling to make it fit (unless squared).
*/
#define GUD_PROPERTY_ROTATION 50
#define GUD_ROTATION_0 BIT(0)
#define GUD_ROTATION_90 BIT(1)
#define GUD_ROTATION_180 BIT(2)
#define GUD_ROTATION_270 BIT(3)
#define GUD_ROTATION_REFLECT_X BIT(4)
#define GUD_ROTATION_REFLECT_Y BIT(5)
#define GUD_ROTATION_MASK (GUD_ROTATION_0 | GUD_ROTATION_90 | \
GUD_ROTATION_180 | GUD_ROTATION_270 | \
GUD_ROTATION_REFLECT_X | GUD_ROTATION_REFLECT_Y)
/* USB Control requests: */
/* Get status from the last GET/SET control request. Value is u8. */
#define GUD_REQ_GET_STATUS 0x00
/* Status values: */
#define GUD_STATUS_OK 0x00
#define GUD_STATUS_BUSY 0x01
#define GUD_STATUS_REQUEST_NOT_SUPPORTED 0x02
#define GUD_STATUS_PROTOCOL_ERROR 0x03
#define GUD_STATUS_INVALID_PARAMETER 0x04
#define GUD_STATUS_ERROR 0x05
/* Get display descriptor as a &gud_display_descriptor_req */
#define GUD_REQ_GET_DESCRIPTOR 0x01
/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
#define GUD_REQ_GET_FORMATS 0x40
#define GUD_FORMATS_MAX_NUM 32
/* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
#define GUD_PIXEL_FORMAT_R1 0x01
#define GUD_PIXEL_FORMAT_XRGB1111 0x20
#define GUD_PIXEL_FORMAT_RGB565 0x40
#define GUD_PIXEL_FORMAT_XRGB8888 0x80
#define GUD_PIXEL_FORMAT_ARGB8888 0x81
/*
* Get supported properties that are not connector propeties as a &gud_property_req array.
* gud_property_req.val often contains the initial value for the property.
*/
#define GUD_REQ_GET_PROPERTIES 0x41
#define GUD_PROPERTIES_MAX_NUM 32
/* Connector requests have the connector index passed in the wValue field */
/* Get connector descriptors as an array of &gud_connector_descriptor_req */
#define GUD_REQ_GET_CONNECTORS 0x50
#define GUD_CONNECTORS_MAX_NUM 32
/*
* Get properties supported by the connector as a &gud_property_req array.
* gud_property_req.val often contains the initial value for the property.
*/
#define GUD_REQ_GET_CONNECTOR_PROPERTIES 0x51
#define GUD_CONNECTOR_PROPERTIES_MAX_NUM 32
/*
* Issued when there's a TV_MODE property present.
* Gets an array of the supported TV_MODE names each entry of length
* GUD_CONNECTOR_TV_MODE_NAME_LEN. Names must be NUL-terminated.
*/
#define GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES 0x52
#define GUD_CONNECTOR_TV_MODE_NAME_LEN 16
#define GUD_CONNECTOR_TV_MODE_MAX_NUM 16
/* When userspace checks connector status, this is issued first, not used for poll requests. */
#define GUD_REQ_SET_CONNECTOR_FORCE_DETECT 0x53
/*
* Get connector status. Value is u8.
*
* Userspace will get a HOTPLUG uevent if one of the following is true:
* - Connection status has changed since last
* - CHANGED is set
*/
#define GUD_REQ_GET_CONNECTOR_STATUS 0x54
#define GUD_CONNECTOR_STATUS_DISCONNECTED 0x00
#define GUD_CONNECTOR_STATUS_CONNECTED 0x01
#define GUD_CONNECTOR_STATUS_UNKNOWN 0x02
#define GUD_CONNECTOR_STATUS_CONNECTED_MASK 0x03
#define GUD_CONNECTOR_STATUS_CHANGED BIT(7)
/*
* Display modes can be fetched as either EDID data or an array of &gud_display_mode_req.
*
* If GUD_REQ_GET_CONNECTOR_MODES returns zero, EDID is used to create display modes.
* If both display modes and EDID are returned, EDID is just passed on to userspace
* in the EDID connector property.
*/
/* Get &gud_display_mode_req array of supported display modes */
#define GUD_REQ_GET_CONNECTOR_MODES 0x55
#define GUD_CONNECTOR_MAX_NUM_MODES 128
/* Get Extended Display Identification Data */
#define GUD_REQ_GET_CONNECTOR_EDID 0x56
#define GUD_CONNECTOR_MAX_EDID_LEN 2048
/* Set buffer properties before bulk transfer as &gud_set_buffer_req */
#define GUD_REQ_SET_BUFFER 0x60
/* Check display configuration as &gud_state_req */
#define GUD_REQ_SET_STATE_CHECK 0x61
/* Apply the previous STATE_CHECK configuration */
#define GUD_REQ_SET_STATE_COMMIT 0x62
/* Enable/disable the display controller, value is u8: 0/1 */
#define GUD_REQ_SET_CONTROLLER_ENABLE 0x63
/* Enable/disable display/output (DPMS), value is u8: 0/1 */
#define GUD_REQ_SET_DISPLAY_ENABLE 0x64
#endif

Просмотреть файл

@ -144,7 +144,6 @@ struct ttm_buffer_object {
struct list_head lru;
struct list_head ddestroy;
struct list_head swap;
/**
* Members protected by a bo reservation.
@ -560,7 +559,8 @@ ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the

Просмотреть файл

@ -69,7 +69,6 @@ struct ttm_lru_bulk_move_pos {
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
};
/*
@ -181,9 +180,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&ttm_glob.lru_lock);
spin_lock(&bo->bdev->lru_lock);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
spin_unlock(&ttm_glob.lru_lock);
spin_unlock(&bo->bdev->lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,

Просмотреть файл

@ -56,18 +56,12 @@ extern struct ttm_global {
*/
struct page *dummy_read_page;
spinlock_t lru_lock;
/**
* Protected by ttm_global_mutex.
*/
struct list_head device_list;
/**
* Protected by the lru_lock.
*/
struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
/**
* Internal protection.
*/
@ -282,8 +276,9 @@ struct ttm_device {
struct ttm_pool pool;
/*
* Protected by the global:lru lock.
* Protection for the per manager LRU and ddestroy lists.
*/
spinlock_t lru_lock;
struct list_head ddestroy;
/*
@ -297,6 +292,10 @@ struct ttm_device {
struct delayed_work wq;
};
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{

Просмотреть файл

@ -157,8 +157,7 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper
*/
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
int ttm_tt_mgr_init(void);
void ttm_tt_mgr_fini(void);
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>

Просмотреть файл

@ -659,9 +659,6 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
/* drivers/video/fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern void fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);

Просмотреть файл

@ -112,7 +112,9 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
#if defined(CONFIG_VGA_ARB)
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
#else
#define vga_put(pdev, rsrc)
static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc)
{
}
#endif

Просмотреть файл

@ -625,30 +625,147 @@ struct drm_gem_open {
__u64 size;
};
/**
* DRM_CAP_DUMB_BUFFER
*
* If set to 1, the driver supports creating dumb buffers via the
* &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
*/
#define DRM_CAP_DUMB_BUFFER 0x1
/**
* DRM_CAP_VBLANK_HIGH_CRTC
*
* If set to 1, the kernel supports specifying a CRTC index in the high bits of
* &drm_wait_vblank_request.type.
*
* Starting kernel version 2.6.39, this capability is always set to 1.
*/
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
/**
* DRM_CAP_DUMB_PREFERRED_DEPTH
*
* The preferred bit depth for dumb buffers.
*
* The bit depth is the number of bits used to indicate the color of a single
* pixel excluding any padding. This is different from the number of bits per
* pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
* pixel.
*
* Note that this preference only applies to dumb buffers, it's irrelevant for
* other types of buffers.
*/
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
/**
* DRM_CAP_DUMB_PREFER_SHADOW
*
* If set to 1, the driver prefers userspace to render to a shadow buffer
* instead of directly rendering to a dumb buffer. For best speed, userspace
* should do streaming ordered memory copies into the dumb buffer and never
* read from it.
*
* Note that this preference only applies to dumb buffers, it's irrelevant for
* other types of buffers.
*/
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
/**
* DRM_CAP_PRIME
*
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
* PRIME buffers are exposed as dma-buf file descriptors. See
* Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
*/
#define DRM_CAP_PRIME 0x5
/**
* DRM_PRIME_CAP_IMPORT
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
* DRM_PRIME_CAP_EXPORT
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
* DRM_CAP_TIMESTAMP_MONOTONIC
*
* If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
* struct drm_event_vblank. If set to 1, the kernel will report timestamps with
* ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
* clocks.
*
* Starting from kernel version 2.6.39, the default value for this capability
* is 1. Starting kernel version 4.15, this capability is always set to 1.
*/
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/*
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
* combination for the hardware cursor. The intention is that a hardware
* agnostic userspace can query a cursor plane size to use.
/**
* DRM_CAP_CURSOR_WIDTH
*
* The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
* width x height combination for the hardware cursor. The intention is that a
* hardware agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
/**
* DRM_CAP_CURSOR_HEIGHT
*
* See &DRM_CAP_CURSOR_WIDTH.
*/
#define DRM_CAP_CURSOR_HEIGHT 0x9
/**
* DRM_CAP_ADDFB2_MODIFIERS
*
* If set to 1, the driver supports supplying modifiers in the
* &DRM_IOCTL_MODE_ADDFB2 ioctl.
*/
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
/**
* DRM_CAP_PAGE_FLIP_TARGET
*
* If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
* &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
* &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
* ioctl.
*/
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
/**
* DRM_CAP_CRTC_IN_VBLANK_EVENT
*
* If set to 1, the kernel supports reporting the CRTC ID in
* &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
* &DRM_EVENT_FLIP_COMPLETE events.
*
* Starting kernel version 4.12, this capability is always set to 1.
*/
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
/**
* DRM_CAP_SYNCOBJ
*
* If set to 1, the driver supports sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/* DRM_IOCTL_GET_CAP ioctl argument type */

Просмотреть файл

@ -388,6 +388,7 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
#define DRM_MODE_CONNECTOR_SPI 19
#define DRM_MODE_CONNECTOR_USB 20
/**
* struct drm_mode_get_connector - Get connector metadata.