Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main git pull for the drm,

  I pretty much froze major pulls at -rc5/6 time, and haven't had much
  fallout, so will probably continue doing that.

  Lots of changes all over, big internal header cleanup to make it clear
  drm features are legacy things and what are things that modern KMS
  drivers should be using.  Also big move to use the new generic fences
  in all the TTM drivers.

  core:
        atomic prep work,
        vblank rework changes, allows immediate vblank disables
        major header reworking and cleanups to better delinate legacy
        interfaces from what KMS drivers should be using.
        cursor planes locking fixes

  ttm:
        move to generic fences (affects all TTM drivers)
        ppc64 caching fixes

  radeon:
        userptr support,
        uvd for old asics,
        reset rework for fence changes
        better buffer placement changes,
        dpm feature enablement
        hdmi audio support fixes

  intel:
        Cherryview work,
        180 degree rotation,
        skylake prep work,
        execlist command submission
        full ppgtt prep work
        cursor improvements
        edid caching,
        vdd handling improvements

  nouveau:
        fence reworking
        kepler memory clock work
        gt21x clock work
        fan control improvements
        hdmi infoframe fixes
        DP audio

  ast:
        ppc64 fixes
        caching fix

  rcar:
        rcar-du DT support

  ipuv3:
        prep work for capture support

  msm:
        LVDS support for mdp4, new panel, gpu refactoring

  exynos:
        exynos3250 SoC support, drop bad mmap interface,
        mipi dsi changes, and component match support"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (640 commits)
  drm/mst: rework payload table allocation to conform better.
  drm/ast: Fix HW cursor image
  drm/radeon/kv: add uvd/vce info to dpm debugfs output
  drm/radeon/ci: add uvd/vce info to dpm debugfs output
  drm/radeon: export reservation_object from dmabuf to ttm
  drm/radeon: cope with foreign fences inside the reservation object
  drm/radeon: cope with foreign fences inside display
  drm/core: use helper to check driver features
  drm/radeon/cik: write gfx ucode version to ucode addr reg
  drm/radeon/si: print full CS when we hit a packet 0
  drm/radeon: remove unecessary includes
  drm/radeon/combios: declare legacy_connector_convert as static
  drm/radeon/atombios: declare connector convert tables as static
  drm/radeon: drop btc_get_max_clock_from_voltage_dependency_table
  drm/radeon/dpm: drop clk/voltage dependency filters for BTC
  drm/radeon/dpm: drop clk/voltage dependency filters for CI
  drm/radeon/dpm: drop clk/voltage dependency filters for SI
  drm/radeon/dpm: drop clk/voltage dependency filters for NI
  drm/radeon: disable audio when we disable hdmi (v2)
  drm/radeon: split audio enable between eg and r600 (v2)
  ...
This commit is contained in:
Linus Torvalds 2014-10-14 09:39:08 +02:00
Родитель da92da3638 dfda0df342
Коммит 2d65a9f48f
524 изменённых файлов: 24750 добавлений и 11271 удалений

Просмотреть файл

@ -291,10 +291,9 @@ char *date;</synopsis>
<title>Device Registration</title>
<para>
A number of functions are provided to help with device registration.
The functions deal with PCI, USB and platform devices, respectively.
The functions deal with PCI and platform devices, respectively.
</para>
!Edrivers/gpu/drm/drm_pci.c
!Edrivers/gpu/drm/drm_usb.c
!Edrivers/gpu/drm/drm_platform.c
<para>
New drivers that no longer rely on the services provided by the
@ -3386,6 +3385,13 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
by scheduling a timer. The delay is accessible through the vblankoffdelay
module parameter or the <varname>drm_vblank_offdelay</varname> global
variable and expressed in milliseconds. Its default value is 5000 ms.
Zero means never disable, and a negative value means disable immediately.
Drivers may override the behaviour by setting the
<structname>drm_device</structname>
<structfield>vblank_disable_immediate</structfield> flag, which when set
causes vblank interrupts to be disabled immediately regardless of the
drm_vblank_offdelay value. The flag should only be set if there's a
properly working hardware vblank counter present.
</para>
<para>
When a vertical blanking interrupt occurs drivers only need to call the
@ -3400,6 +3406,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
<sect2>
<title>Vertical Blanking and Interrupt Handling Functions Reference</title>
!Edrivers/gpu/drm/drm_irq.c
!Finclude/drm/drmP.h drm_crtc_vblank_waitqueue
</sect2>
</sect1>
@ -3918,6 +3925,11 @@ int num_ioctls;</synopsis>
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
</sect1>
</chapter>
</part>

Просмотреть файл

@ -18,6 +18,10 @@ Required properties:
Documentation/devicetree/bindings/video/display-timing.txt for display
timing binding details.
Optional properties:
- backlight: phandle of the backlight device attached to the panel
- enable-gpios: GPIO pin to enable or disable the panel
Recommended properties:
- pinctrl-names, pinctrl-0: the pincontrol settings to configure
muxing properly for pins that connect to TFP410 device
@ -29,6 +33,9 @@ Example:
compatible = "ti,tilcdc,panel";
pinctrl-names = "default";
pinctrl-0 = <&bone_lcd3_cape_lcd_pins>;
backlight = <&backlight>;
enable-gpios = <&gpio3 19 0>;
panel-info {
ac-bias = <255>;
ac-bias-intrpt = <0>;

Просмотреть файл

@ -0,0 +1,7 @@
AU Optronics Corporation 10.1" WXGA TFT LCD panel
Required properties:
- compatible: should be "auo,b101xtn01"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

Просмотреть файл

@ -92,6 +92,7 @@ maxim Maxim Integrated Products
mediatek MediaTek Inc.
micrel Micrel Inc.
microchip Microchip Technology Inc.
mitsubishi Mitsubishi Electric Corporation
mosaixtech Mosaix Technologies, Inc.
moxa Moxa
mpl MPL AG
@ -144,6 +145,7 @@ st STMicroelectronics
ste ST-Ericsson
stericsson ST-Ericsson
synology Synology, Inc.
thine THine Electronics, Inc.
ti Texas Instruments
tlm Trusted Logic Mobility
toradex Toradex AG

Просмотреть файл

@ -0,0 +1,50 @@
Analog Device ADV7123 Video DAC
-------------------------------
The ADV7123 is a digital-to-analog converter that outputs VGA signals from a
parallel video input.
Required properties:
- compatible: Should be "adi,adv7123"
Optional properties:
- psave-gpios: Power save control GPIO
Required nodes:
The ADV7123 has two video ports. Their connections are modeled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for DPI input
- Video port 1 for VGA output
Example
-------
adv7123: encoder@0 {
compatible = "adi,adv7123";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
adv7123_in: endpoint@0 {
remote-endpoint = <&dpi_out>;
};
};
port@1 {
reg = <1>;
adv7123_out: endpoint@0 {
remote-endpoint = <&vga_connector_in>;
};
};
};
};

Просмотреть файл

@ -2,6 +2,7 @@ Exynos MIPI DSI Master
Required properties:
- compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
- reg: physical base address and length of the registers set for the device

Просмотреть файл

@ -0,0 +1,84 @@
* Renesas R-Car Display Unit (DU)
Required Properties:
- compatible: must be one of the following.
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2) compatible DU
- reg: A list of base address and length of each memory resource, one for
each entry in the reg-names property.
- reg-names: Name of the memory resources. The DU requires one memory
resource for the DU core (named "du") and one memory resource for each
LVDS encoder (named "lvds.x" with "x" being the LVDS controller numerical
index).
- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifiers for the DU interrupts.
- clocks: A list of phandles + clock-specifier pairs, one for each entry in
the clock-names property.
- clock-names: Name of the clocks. This property is model-dependent.
- R8A7779 uses a single functional clock. The clock doesn't need to be
named.
- R8A7790 and R8A7791 use one functional clock per channel and one clock
per LVDS encoder. The functional clocks must be named "du.x" with "x"
being the channel numerical index. The LVDS clocks must be named
"lvds.x" with "x" being the LVDS encoder numerical index.
Required nodes:
The connections to the DU output video ports are modeled using the OF graph
bindings specified in Documentation/devicetree/bindings/graph.txt.
The following table lists for each supported model the port number
corresponding to each DU output.
Port 0 Port1 Port2
-----------------------------------------------------------------------------
R8A7779 (H1) DPAD 0 DPAD 1 -
R8A7790 (H2) DPAD LVDS 0 LVDS 1
R8A7791 (M2) DPAD LVDS 0 -
Example: R8A7790 (R-Car H2) DU
du: du@feb00000 {
compatible = "renesas,du-r8a7790";
reg = <0 0xfeb00000 0 0x70000>,
<0 0xfeb90000 0 0x1c>,
<0 0xfeb94000 0 0x1c>;
reg-names = "du", "lvds.0", "lvds.1";
interrupt-parent = <&gic>;
interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
<0 268 IRQ_TYPE_LEVEL_HIGH>,
<0 269 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp7_clks R8A7790_CLK_DU0>,
<&mstp7_clks R8A7790_CLK_DU1>,
<&mstp7_clks R8A7790_CLK_DU2>,
<&mstp7_clks R8A7790_CLK_LVDS0>,
<&mstp7_clks R8A7790_CLK_LVDS1>;
clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
du_out_rgb: endpoint {
};
};
port@1 {
reg = <1>;
du_out_lvds0: endpoint {
};
};
port@2 {
reg = <2>;
du_out_lvds1: endpoint {
};
};
};
};

Просмотреть файл

@ -9,6 +9,7 @@ Required properties:
"samsung,s3c2443-fimd"; /* for S3C24XX SoCs */
"samsung,s3c6400-fimd"; /* for S3C64XX SoCs */
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
"samsung,exynos5250-fimd"; /* for Exynos5 SoCs */

Просмотреть файл

@ -0,0 +1,50 @@
THine Electronics THC63LVDM83D LVDS serializer
----------------------------------------------
The THC63LVDM83D is an LVDS serializer designed to support pixel data
transmission between a host and a flat panel.
Required properties:
- compatible: Should be "thine,thc63lvdm83d"
Optional properties:
- pwdn-gpios: Power down control GPIO
Required nodes:
The THC63LVDM83D has two video ports. Their connections are modeled using the
OFgraph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for CMOS/TTL input
- Video port 1 for LVDS output
Example
-------
lvds_enc: encoder@0 {
compatible = "thine,thc63lvdm83d";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
lvds_enc_in: endpoint@0 {
remote-endpoint = <&rgb_out>;
};
};
port@1 {
reg = <1>;
lvds_enc_out: endpoint@0 {
remote-endpoint = <&panel_in>;
};
};
};
};

Просмотреть файл

@ -0,0 +1,36 @@
VGA Connector
=============
Required properties:
- compatible: "vga-connector"
Optional properties:
- label: a symbolic name for the connector corresponding to a hardware label
- ddc-i2c-bus: phandle to the I2C bus that is connected to VGA DDC
Required nodes:
The VGA connector internal connections are modeled using the OF graph bindings
specified in Documentation/devicetree/bindings/graph.txt.
The VGA connector has a single port that must be connected to a video source
port.
Example
-------
vga0: connector@0 {
compatible = "vga-connector";
label = "vga";
ddc-i2c-bus = <&i2c3>;
port {
vga_connector_in: endpoint {
remote-endpoint = <&adv7123_out>;
};
};
};

Просмотреть файл

@ -3164,7 +3164,7 @@ F: include/drm/drm_panel.h
F: Documentation/devicetree/bindings/panel/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org

Просмотреть файл

@ -132,6 +132,12 @@
reg = <0x10020000 0x4000>;
};
mipi_phy: video-phy@10020710 {
compatible = "samsung,s5pv210-mipi-video-phy";
reg = <0x10020710 8>;
#phy-cells = <1>;
};
pd_cam: cam-power-domain@10023C00 {
compatible = "samsung,exynos4210-pd";
reg = <0x10023C00 0x20>;
@ -216,6 +222,33 @@
interrupts = <0 240 0>;
};
fimd: fimd@11c00000 {
compatible = "samsung,exynos3250-fimd";
reg = <0x11c00000 0x30000>;
interrupt-names = "fifo", "vsync", "lcd_sys";
interrupts = <0 84 0>, <0 85 0>, <0 86 0>;
clocks = <&cmu CLK_SCLK_FIMD0>, <&cmu CLK_FIMD0>;
clock-names = "sclk_fimd", "fimd";
samsung,power-domain = <&pd_lcd0>;
samsung,sysreg = <&sys_reg>;
status = "disabled";
};
dsi_0: dsi@11C80000 {
compatible = "samsung,exynos3250-mipi-dsi";
reg = <0x11C80000 0x10000>;
interrupts = <0 83 0>;
samsung,phy-type = <0>;
samsung,power-domain = <&pd_lcd0>;
phys = <&mipi_phy 1>;
phy-names = "dsim";
clocks = <&cmu CLK_DSIM0>, <&cmu CLK_SCLK_MIPI0>;
clock-names = "bus_clk", "pll_clk";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
mshc_0: mshc@12510000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12510000 0x1000>;

Просмотреть файл

@ -41,16 +41,15 @@ static struct rcar_du_encoder_data koelsch_du_encoders[] = {
.width_mm = 210,
.height_mm = 158,
.mode = {
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1048,
.hsync_end = 1184,
.htotal = 1344,
.vdisplay = 768,
.vsync_start = 771,
.vsync_end = 777,
.vtotal = 806,
.flags = 0,
.pixelclock = 65000000,
.hactive = 1024,
.hfront_porch = 20,
.hback_porch = 160,
.hsync_len = 136,
.vactive = 768,
.vfront_porch = 3,
.vback_porch = 29,
.vsync_len = 6,
},
},
},

Просмотреть файл

@ -63,16 +63,15 @@ static struct rcar_du_encoder_data koelsch_du_encoders[] = {
.width_mm = 210,
.height_mm = 158,
.mode = {
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1048,
.hsync_end = 1184,
.htotal = 1344,
.vdisplay = 768,
.vsync_start = 771,
.vsync_end = 777,
.vtotal = 806,
.flags = 0,
.pixelclock = 65000000,
.hactive = 1024,
.hfront_porch = 20,
.hback_porch = 160,
.hsync_len = 136,
.vactive = 768,
.vfront_porch = 3,
.vback_porch = 29,
.vsync_len = 6,
},
},
},

Просмотреть файл

@ -43,16 +43,15 @@ static struct rcar_du_encoder_data lager_du_encoders[] = {
.width_mm = 210,
.height_mm = 158,
.mode = {
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1048,
.hsync_end = 1184,
.htotal = 1344,
.vdisplay = 768,
.vsync_start = 771,
.vsync_end = 777,
.vtotal = 806,
.flags = 0,
.pixelclock = 65000000,
.hactive = 1024,
.hfront_porch = 20,
.hback_porch = 160,
.hsync_len = 136,
.vactive = 768,
.vfront_porch = 3,
.vback_porch = 29,
.vsync_len = 6,
},
},
},

Просмотреть файл

@ -99,16 +99,15 @@ static struct rcar_du_encoder_data lager_du_encoders[] = {
.width_mm = 210,
.height_mm = 158,
.mode = {
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1048,
.hsync_end = 1184,
.htotal = 1344,
.vdisplay = 768,
.vsync_start = 771,
.vsync_end = 777,
.vtotal = 806,
.flags = 0,
.pixelclock = 65000000,
.hactive = 1024,
.hfront_porch = 20,
.hback_porch = 160,
.hsync_len = 136,
.vactive = 768,
.vfront_porch = 3,
.vback_porch = 29,
.vsync_len = 6,
},
},
},

Просмотреть файл

@ -192,16 +192,15 @@ static struct rcar_du_encoder_data du_encoders[] = {
.width_mm = 210,
.height_mm = 158,
.mode = {
.clock = 65000,
.hdisplay = 1024,
.hsync_start = 1048,
.hsync_end = 1184,
.htotal = 1344,
.vdisplay = 768,
.vsync_start = 771,
.vsync_end = 777,
.vtotal = 806,
.flags = 0,
.pixelclock = 65000000,
.hactive = 1024,
.hfront_porch = 20,
.hback_porch = 160,
.hsync_len = 136,
.vactive = 768,
.vfront_porch = 3,
.vback_porch = 29,
.vsync_len = 6,
},
},
},

Просмотреть файл

@ -8,6 +8,7 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
select HDMI
select FB_CMDLINE
select I2C
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
@ -24,12 +25,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
config DRM_USB
tristate
depends on DRM
depends on USB_SUPPORT && USB_ARCH_HAS_HCD
select USB
config DRM_KMS_HELPER
tristate
depends on DRM
@ -115,6 +110,7 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select MMU_NOTIFIER
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to

Просмотреть файл

@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
@ -22,8 +22,6 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
@ -36,7 +34,6 @@ CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_USB) += drm_usb.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/

Просмотреть файл

@ -308,6 +308,7 @@ static struct drm_driver armada_drm_driver = {
.postclose = NULL,
.lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
.set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,

Просмотреть файл

@ -8,6 +8,8 @@
#ifndef ARMADA_GEM_H
#define ARMADA_GEM_H
#include <drm/drm_gem.h>
/* GEM */
struct armada_gem_object {
struct drm_gem_object obj;

Просмотреть файл

@ -379,11 +379,39 @@ static bool ast_init_dvo(struct drm_device *dev)
return true;
}
static void ast_init_analog(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u32 data;
/*
* Set DAC source to VGA mode in SCU2C via the P2A
* bridge. First configure the P2U to target the SCU
* in case it isn't at this stage.
*/
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
/* Then unlock the SCU with the magic password */
ast_write32(ast, 0x12000, 0x1688a8a8);
ast_write32(ast, 0x12000, 0x1688a8a8);
ast_write32(ast, 0x12000, 0x1688a8a8);
/* Finally, clear bits [17:16] of SCU2c */
data = ast_read32(ast, 0x1202c);
data &= 0xfffcffff;
ast_write32(ast, 0, data);
/* Disable DVO */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00);
}
void ast_init_3rdtx(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 jreg;
u32 data;
if (ast->chip == AST2300 || ast->chip == AST2400) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg & 0x0e) {
@ -399,12 +427,8 @@ void ast_init_3rdtx(struct drm_device *dev)
default:
if (ast->tx_chip_type == AST_TX_SIL164)
ast_init_dvo(dev);
else {
ast_write32(ast, 0x12000, 0x1688a8a8);
data = ast_read32(ast, 0x1202c);
data &= 0xfffcffff;
ast_write32(ast, 0, data);
}
else
ast_init_analog(dev);
}
}
}

Просмотреть файл

@ -199,6 +199,7 @@ static struct drm_driver driver = {
.load = ast_driver_load,
.unload = ast_driver_unload,
.set_busid = drm_pci_set_busid,
.fops = &ast_fops,
.name = DRIVER_NAME,

Просмотреть файл

@ -36,6 +36,8 @@
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#include <drm/drm_gem.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@ -125,8 +127,9 @@ struct ast_gem_object;
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
#define AST_IO_VGA_ENABLE_PORT (0x43)
#define AST_IO_SEQ_PORT (0x44)
#define AST_DAC_INDEX_READ (0x3c7)
#define AST_IO_DAC_INDEX_READ (0x47)
#define AST_IO_DAC_INDEX_WRITE (0x48)
#define AST_IO_DAC_DATA (0x49)
#define AST_IO_GR_PORT (0x4E)
@ -134,6 +137,8 @@ struct ast_gem_object;
#define AST_IO_INPUT_STATUS1_READ (0x5A)
#define AST_IO_MISC_PORT_READ (0x4C)
#define AST_IO_MM_OFFSET (0x380)
#define __ast_read(x) \
static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\
@ -316,7 +321,7 @@ struct ast_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
u32 placements[3];
struct ttm_place placements[3];
int pin_count;
};
#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
@ -381,6 +386,9 @@ int ast_bo_push_sysram(struct ast_bo *bo);
int ast_mmap(struct file *filp, struct vm_area_struct *vma);
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_private *ast, u32 r);
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);

Просмотреть файл

@ -186,7 +186,8 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
static int astfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
struct ast_fbdev *afbdev =
container_of(helper, struct ast_fbdev, helper);
struct drm_device *dev = afbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_framebuffer *fb;

Просмотреть файл

@ -63,7 +63,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
}
static int ast_detect_chip(struct drm_device *dev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = dev->dev_private;
uint32_t data, jreg;
@ -110,6 +110,21 @@ static int ast_detect_chip(struct drm_device *dev)
}
}
/*
* If VGA isn't enabled, we need to enable now or subsequent
* access to the scratch registers will fail. We also inform
* our caller that it needs to POST the chip
* (Assumption: VGA not enabled -> need to POST)
*/
if (!ast_is_vga_enabled(dev)) {
ast_enable_vga(dev);
ast_enable_mmio(dev);
DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
*need_post = true;
} else
*need_post = false;
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
ast->support_wide_screen = true;
@ -125,6 +140,7 @@ static int ast_detect_chip(struct drm_device *dev)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x1207c);
@ -137,11 +153,29 @@ static int ast_detect_chip(struct drm_device *dev)
break;
}
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip_type = AST_TX_NONE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
ast->tx_chip_type = AST_TX_SIL164;
/*
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
* enabled, in that case, assume we have a SIL164 TMDS transmitter
*
* Don't make that assumption if we the chip wasn't enabled and
* is at power-on reset, otherwise we'll incorrectly "detect" a
* SIL164 when there is none.
*/
if (!*need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
ast->tx_chip_type = AST_TX_SIL164;
}
if ((ast->chip == AST2300) || (ast->chip == AST2400)) {
/*
* On AST2300 and 2400, look the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg) {
case 0x04:
@ -162,6 +196,17 @@ static int ast_detect_chip(struct drm_device *dev)
}
}
/* Print stuff for diagnostic purposes */
switch(ast->tx_chip_type) {
case AST_TX_SIL164:
DRM_INFO("Using Sil164 TMDS transmitter\n");
break;
case AST_TX_DP501:
DRM_INFO("Using DP501 DisplayPort transmitter\n");
break;
default:
DRM_INFO("Analog VGA only\n");
}
return 0;
}
@ -346,6 +391,7 @@ static u32 ast_get_vram_info(struct drm_device *dev)
int ast_driver_load(struct drm_device *dev, unsigned long flags)
{
struct ast_private *ast;
bool need_post;
int ret = 0;
ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
@ -360,13 +406,27 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ret = -EIO;
goto out_free;
}
ast->ioregs = pci_iomap(dev->pdev, 2, 0);
if (!ast->ioregs) {
ret = -EIO;
goto out_free;
/*
* If we don't have IO space at all, use MMIO now and
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
*/
if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) {
DRM_INFO("platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
ast_detect_chip(dev);
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
ast->ioregs = pci_iomap(dev->pdev, 2, 0);
if (!ast->ioregs) {
ret = -EIO;
goto out_free;
}
}
ast_detect_chip(dev, &need_post);
if (ast->chip != AST1180) {
ast_get_dram_info(dev);
@ -374,6 +434,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
}
if (need_post)
ast_post_gpu(dev);
ret = ast_mm_init(ast);
if (ret)
goto out_free;

Просмотреть файл

@ -80,6 +80,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
struct ast_private *ast = crtc->dev->dev_private;
u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
u32 hborder, vborder;
bool check_sync;
struct ast_vbios_enhtable *best = NULL;
switch (crtc->primary->fb->bits_per_pixel) {
case 8:
@ -141,14 +143,34 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
}
refresh_rate = drm_mode_vrefresh(mode);
while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
vbios_mode->enh_table++;
if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
(vbios_mode->enh_table->refresh_rate == 0xff)) {
vbios_mode->enh_table--;
break;
check_sync = vbios_mode->enh_table->flags & WideScreenMode;
do {
struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
while (loop->refresh_rate != 0xff) {
if ((check_sync) &&
(((mode->flags & DRM_MODE_FLAG_NVSYNC) &&
(loop->flags & PVSync)) ||
((mode->flags & DRM_MODE_FLAG_PVSYNC) &&
(loop->flags & NVSync)) ||
((mode->flags & DRM_MODE_FLAG_NHSYNC) &&
(loop->flags & PHSync)) ||
((mode->flags & DRM_MODE_FLAG_PHSYNC) &&
(loop->flags & NHSync)))) {
loop++;
continue;
}
if (loop->refresh_rate <= refresh_rate
&& (!best || loop->refresh_rate > best->refresh_rate))
best = loop;
loop++;
}
}
if (best || !check_sync)
break;
check_sync = 0;
} while (1);
if (best)
vbios_mode->enh_table = best;
hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
@ -419,8 +441,10 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
struct ast_private *ast = dev->dev_private;
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
jreg |= (vbios_mode->enh_table->flags & SyncNN);
jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
jreg &= ~0xC0;
if (vbios_mode->enh_table->flags & NVSync) jreg |= 0x80;
if (vbios_mode->enh_table->flags & NHSync) jreg |= 0x40;
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
@ -1080,8 +1104,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
writel(data32.ul, dstxor);
csum += data32.ul;

Просмотреть файл

@ -33,18 +33,23 @@
static void ast_init_dram_2300(struct drm_device *dev);
static void
ast_enable_vga(struct drm_device *dev)
void ast_enable_vga(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ast_io_write8(ast, 0x43, 0x01);
ast_io_write8(ast, 0x42, 0x01);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
}
#if 0 /* will use later */
static bool
ast_is_vga_enabled(struct drm_device *dev)
void ast_enable_mmio(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
}
bool ast_is_vga_enabled(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u8 ch;
@ -52,7 +57,7 @@ ast_is_vga_enabled(struct drm_device *dev)
if (ast->chip == AST1180) {
/* TODO 1180 */
} else {
ch = ast_io_read8(ast, 0x43);
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
if (ch) {
ast_open_key(ast);
ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
@ -61,7 +66,6 @@ ast_is_vga_enabled(struct drm_device *dev)
}
return 0;
}
#endif
static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
@ -371,6 +375,7 @@ void ast_post_gpu(struct drm_device *dev)
pci_write_config_dword(ast->dev->pdev, 0x04, reg);
ast_enable_vga(dev);
ast_enable_mmio(dev);
ast_open_key(ast);
ast_set_def_ext_reg(dev);

Просмотреть файл

@ -35,14 +35,18 @@
#define HalfDCLK 0x00000002
#define DoubleScanMode 0x00000004
#define LineCompareOff 0x00000008
#define SyncPP 0x00000000
#define SyncPN 0x00000040
#define SyncNP 0x00000080
#define SyncNN 0x000000C0
#define HBorder 0x00000020
#define VBorder 0x00000010
#define WideScreenMode 0x00000100
#define NewModeInfo 0x00000200
#define NHSync 0x00000400
#define PHSync 0x00000800
#define NVSync 0x00001000
#define PVSync 0x00002000
#define SyncPP (PVSync | PHSync)
#define SyncPN (PVSync | NHSync)
#define SyncNP (NVSync | PHSync)
#define SyncNN (NVSync | NHSync)
/* DCLK Index */
#define VCLK25_175 0x00
@ -72,6 +76,7 @@
#define VCLK119 0x17
#define VCLK85_5 0x18
#define VCLK97_75 0x19
#define VCLK118_25 0x1A
static struct ast_vbios_dclk_info dclk_table[] = {
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
@ -100,6 +105,7 @@ static struct ast_vbios_dclk_info dclk_table[] = {
{0x77, 0x58, 0x80}, /* 17: VCLK119 */
{0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
{0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
{0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */
};
static struct ast_vbios_stdtable vbios_stdtable[] = {
@ -246,8 +252,10 @@ static struct ast_vbios_enhtable res_1360x768[] = {
static struct ast_vbios_enhtable res_1600x900[] = {
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x3A },
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* end */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x3A }
{2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A },
{2112, 1600, 88,168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A },
};
static struct ast_vbios_enhtable res_1920x1080[] = {
@ -261,11 +269,11 @@ static struct ast_vbios_enhtable res_1920x1080[] = {
/* 16:10 */
static struct ast_vbios_enhtable res_1280x800[] = {
{1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 35 },
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
{1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x35 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
{1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x35 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x35 },
};
@ -273,24 +281,24 @@ static struct ast_vbios_enhtable res_1440x900[] = {
{1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
{1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x36 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
{1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x36 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 },
};
static struct ast_vbios_enhtable res_1680x1050[] = {
{1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x37 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x37 },
(SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 },
};
static struct ast_vbios_enhtable res_1920x1200[] = {
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x34 },
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 1, 0x34 },
};

Просмотреть файл

@ -293,18 +293,22 @@ void ast_mm_fini(struct ast_private *ast)
void ast_ttm_placement(struct ast_bo *bo, int domain)
{
u32 c = 0;
bo->placement.fpfn = 0;
bo->placement.lpfn = 0;
unsigned i;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
if (!c)
bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
bo->placements[i].fpfn = 0;
bo->placements[i].lpfn = 0;
}
}
int ast_bo_create(struct drm_device *dev, int size, int align,
@ -335,7 +339,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
NULL, NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
@ -360,7 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@ -383,7 +387,7 @@ int ast_bo_unpin(struct ast_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@ -407,7 +411,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
@ -423,7 +427,7 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma)
struct ast_private *ast;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
return -EINVAL;
file_priv = filp->private_data;
ast = file_priv->minor->dev->dev_private;

Просмотреть файл

@ -34,6 +34,8 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/ati_pcigart.h>
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,

Просмотреть файл

@ -7,6 +7,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_page_alloc.h>
@ -99,7 +101,7 @@ struct bochs_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
u32 placements[3];
struct ttm_place placements[3];
int pin_count;
};

Просмотреть файл

@ -82,6 +82,7 @@ static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = bochs_load,
.unload = bochs_unload,
.set_busid = drm_pci_set_busid,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",

Просмотреть файл

@ -257,20 +257,26 @@ void bochs_mm_fini(struct bochs_device *bochs)
static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
{
unsigned i;
u32 c = 0;
bo->placement.fpfn = 0;
bo->placement.lpfn = 0;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM) {
bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
bo->placements[c++].flags = TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM;
}
if (domain & TTM_PL_FLAG_SYSTEM) {
bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placements[c++].flags = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM;
}
if (!c) {
bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placements[c++].flags = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM;
}
for (i = 0; i < c; ++i) {
bo->placements[i].fpfn = 0;
bo->placements[i].lpfn = 0;
}
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
@ -294,7 +300,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
bochs_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@ -319,7 +325,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@ -333,7 +339,7 @@ int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
struct bochs_device *bochs;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
return -EINVAL;
file_priv = filp->private_data;
bochs = file_priv->minor->dev->dev_private;
@ -371,7 +377,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
ttm_bo_type_device, &bochsbo->placement,
align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, bochs_bo_ttm_destroy);
NULL, NULL, bochs_bo_ttm_destroy);
if (ret)
return ret;

Просмотреть файл

@ -128,6 +128,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = cirrus_driver_load,
.unload = cirrus_driver_unload,
.set_busid = drm_pci_set_busid,
.fops = &cirrus_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,

Просмотреть файл

@ -21,6 +21,8 @@
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#include <drm/drm_gem.h>
#define DRIVER_AUTHOR "Matthew Garrett"
#define DRIVER_NAME "cirrus"
@ -163,7 +165,7 @@ struct cirrus_bo {
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
u32 placements[3];
struct ttm_place placements[3];
int pin_count;
};
#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)

Просмотреть файл

@ -160,7 +160,8 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
struct cirrus_fbdev *gfbdev =
container_of(helper, struct cirrus_fbdev, helper);
struct drm_device *dev = gfbdev->helper.dev;
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;

Просмотреть файл

@ -298,18 +298,21 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
{
u32 c = 0;
bo->placement.fpfn = 0;
bo->placement.lpfn = 0;
unsigned i;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
bo->placements[i].fpfn = 0;
bo->placements[i].lpfn = 0;
}
}
int cirrus_bo_create(struct drm_device *dev, int size, int align,
@ -340,7 +343,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
NULL, NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@ -365,7 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@ -392,7 +395,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
@ -408,7 +411,7 @@ int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
struct cirrus_device *cirrus;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
return -EINVAL;
file_priv = filp->private_data;
cirrus = file_priv->minor->dev->dev_private;

Просмотреть файл

@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "drm_legacy.h"
#if __OS_HAS_AGP

Просмотреть файл

@ -34,6 +34,13 @@
*/
#include <drm/drmP.h>
#include "drm_internal.h"
struct drm_magic_entry {
struct list_head head;
struct drm_hash_item hash_item;
struct drm_file *priv;
};
/**
* Find the file with the given magic number.

Просмотреть файл

@ -1,18 +1,13 @@
/**
* \file drm_bufs.c
* Generic buffer template
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
* Legacy: Generic DRM Buffer Management
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
* Author: Gareth Hughes <gareth@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@ -39,6 +34,7 @@
#include <linux/export.h>
#include <asm/shmparam.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
@ -365,9 +361,9 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
int drm_addmap(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map ** map_ptr)
int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_ptr)
{
struct drm_map_list *list;
int rc;
@ -377,8 +373,7 @@ int drm_addmap(struct drm_device * dev, resource_size_t offset,
*map_ptr = list->map;
return rc;
}
EXPORT_SYMBOL(drm_addmap);
EXPORT_SYMBOL(drm_legacy_addmap);
/**
* Ioctl to specify a range of memory that is available for mapping by a
@ -391,8 +386,8 @@ EXPORT_SYMBOL(drm_addmap);
* \return zero on success or a negative value on error.
*
*/
int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
struct drm_map_list *maplist;
@ -429,9 +424,9 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
* its being used, and free any associate resource (such as MTRR's) if it's not
* being on use.
*
* \sa drm_addmap
* \sa drm_legacy_addmap
*/
int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
@ -478,26 +473,26 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
__drm_legacy_pci_free(dev, &dmah);
break;
}
kfree(map);
return 0;
}
EXPORT_SYMBOL(drm_rmmap_locked);
EXPORT_SYMBOL(drm_legacy_rmmap_locked);
int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_rmmap_locked(dev, map);
ret = drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_rmmap);
EXPORT_SYMBOL(drm_legacy_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
@ -514,8 +509,8 @@ EXPORT_SYMBOL(drm_rmmap);
* \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*/
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *request = data;
struct drm_local_map *map = NULL;
@ -546,7 +541,7 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
ret = drm_rmmap_locked(dev, map);
ret = drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
@ -599,7 +594,8 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
int drm_legacy_addbufs_agp(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
@ -759,10 +755,11 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_addbufs_agp);
EXPORT_SYMBOL(drm_legacy_addbufs_agp);
#endif /* __OS_HAS_AGP */
int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
int drm_legacy_addbufs_pci(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
int count;
@ -964,9 +961,10 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return 0;
}
EXPORT_SYMBOL(drm_addbufs_pci);
EXPORT_SYMBOL(drm_legacy_addbufs_pci);
static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
static int drm_legacy_addbufs_sg(struct drm_device *dev,
struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
@ -1135,8 +1133,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
* PCI memory respectively.
*/
int drm_addbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_addbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_buf_desc *request = data;
int ret;
@ -1149,15 +1147,15 @@ int drm_addbufs(struct drm_device *dev, void *data,
#if __OS_HAS_AGP
if (request->flags & _DRM_AGP_BUFFER)
ret = drm_addbufs_agp(dev, request);
ret = drm_legacy_addbufs_agp(dev, request);
else
#endif
if (request->flags & _DRM_SG_BUFFER)
ret = drm_addbufs_sg(dev, request);
ret = drm_legacy_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
ret = -EINVAL;
else
ret = drm_addbufs_pci(dev, request);
ret = drm_legacy_addbufs_pci(dev, request);
return ret;
}
@ -1179,8 +1177,8 @@ int drm_addbufs(struct drm_device *dev, void *data,
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
int drm_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_info *request = data;
@ -1260,8 +1258,8 @@ int drm_infobufs(struct drm_device *dev, void *data,
*
* \note This ioctl is deprecated and mostly never used.
*/
int drm_markbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_markbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_desc *request = data;
@ -1307,8 +1305,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
* Calls free_buffer() for each used buffer.
* This function is primarily used for debugging.
*/
int drm_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_free *request = data;
@ -1340,7 +1338,7 @@ int drm_freebufs(struct drm_device *dev, void *data,
task_pid_nr(current));
return -EINVAL;
}
drm_free_buffer(dev, buf);
drm_legacy_free_buffer(dev, buf);
}
return 0;
@ -1360,8 +1358,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
@ -1448,7 +1446,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
return retcode;
}
int drm_dma_ioctl(struct drm_device *dev, void *data,
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
@ -1460,7 +1458,7 @@ int drm_dma_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
struct drm_local_map *drm_getsarea(struct drm_device *dev)
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
{
struct drm_map_list *entry;
@ -1472,4 +1470,4 @@ struct drm_local_map *drm_getsarea(struct drm_device *dev)
}
return NULL;
}
EXPORT_SYMBOL(drm_getsarea);
EXPORT_SYMBOL(drm_legacy_getsarea);

Просмотреть файл

@ -40,106 +40,12 @@
#include <drm/drm_modeset_lock.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
mutex_lock(&config->mutex);
drm_modeset_acquire_init(ctx, 0);
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
WARN_ON(config->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
*/
config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev);
return;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
}
EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
if (WARN_ON(!ctx))
return;
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
kfree(ctx);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*
* Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
/* Locking is currently fubar in the panic handler. */
if (oops_in_progress)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
const char *fnname(int val) \
@ -515,9 +421,6 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
if (ret)
goto out;
/* Grab the idr reference. */
drm_framebuffer_reference(fb);
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
out:
@ -527,10 +430,34 @@ out:
}
EXPORT_SYMBOL(drm_framebuffer_init);
/* dev->mode_config.fb_lock must be held! */
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
mutex_unlock(&dev->mode_config.idr_mutex);
fb->base.id = 0;
}
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
container_of(kref, struct drm_framebuffer, refcount);
struct drm_device *dev = fb->dev;
/*
* The lookup idr holds a weak reference, which has not necessarily been
* removed at this point. Check for that.
*/
mutex_lock(&dev->mode_config.fb_lock);
if (fb->base.id) {
/* Mark fb as reaped and drop idr ref. */
__drm_framebuffer_unregister(dev, fb);
}
mutex_unlock(&dev->mode_config.fb_lock);
fb->funcs->destroy(fb);
}
@ -567,8 +494,10 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, id);
if (fb)
drm_framebuffer_reference(fb);
if (fb) {
if (!kref_get_unless_zero(&fb->refcount))
fb = NULL;
}
mutex_unlock(&dev->mode_config.fb_lock);
return fb;
@ -612,19 +541,6 @@ static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
/* dev->mode_config.fb_lock must be held! */
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
mutex_unlock(&dev->mode_config.idr_mutex);
fb->base.id = 0;
__drm_framebuffer_unreference(fb);
}
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
@ -764,11 +680,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->funcs = funcs;
crtc->invert_dimensions = false;
drm_modeset_lock_all(dev);
drm_modeset_lock_init(&crtc->mutex);
/* dropped by _unlock_all(): */
drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
goto out;
@ -786,7 +698,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
out:
drm_modeset_unlock_all(dev);
return ret;
}
@ -852,6 +763,59 @@ static void drm_mode_remove(struct drm_connector *connector,
drm_mode_destroy(connector->dev, mode);
}
/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
* @mode: returned mode
*
* The kernel supports per-connector configration of its consoles through
* use of the video= parameter. This function parses that option and
* extracts the user's specified mode (or enable/disable status) for a
* particular connector. This is typically only used during the early fbdev
* setup.
*/
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
char *option = NULL;
if (fb_get_options(connector->name, &option))
return;
if (!drm_mode_parse_command_line_for_connector(option,
connector,
mode))
return;
if (mode->force) {
const char *s;
switch (mode->force) {
case DRM_FORCE_OFF:
s = "OFF";
break;
case DRM_FORCE_ON_DIGITAL:
s = "ON - dig";
break;
default:
case DRM_FORCE_ON:
s = "ON";
break;
}
DRM_INFO("forcing %s connector %s\n", connector->name, s);
connector->force = mode->force;
}
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
connector->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
mode->margins ? " with margins" : "",
mode->interlace ? " interlaced" : "");
}
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@ -904,6 +868,8 @@ int drm_connector_init(struct drm_device *dev,
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
drm_connector_get_cmdline_mode(connector);
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
@ -956,6 +922,29 @@ void drm_connector_cleanup(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_cleanup);
/**
* drm_connector_index - find the index of a registered connector
* @connector: connector to find index for
*
* Given a registered connector, return the index of that connector within a DRM
* device's list of connectors.
*/
unsigned int drm_connector_index(struct drm_connector *connector)
{
unsigned int index = 0;
struct drm_connector *tmp;
list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
if (tmp == connector)
return index;
index++;
}
BUG();
}
EXPORT_SYMBOL(drm_connector_index);
/**
* drm_connector_register - register a connector
* @connector: the connector to register
@ -1260,6 +1249,29 @@ void drm_plane_cleanup(struct drm_plane *plane)
}
EXPORT_SYMBOL(drm_plane_cleanup);
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
*
* Given a registered plane, return the index of that CRTC within a DRM
* device's list of planes.
*/
unsigned int drm_plane_index(struct drm_plane *plane)
{
unsigned int index = 0;
struct drm_plane *tmp;
list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
if (tmp == plane)
return index;
index++;
}
BUG();
}
EXPORT_SYMBOL(drm_plane_index);
/**
* drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable
@ -1271,19 +1283,21 @@ EXPORT_SYMBOL(drm_plane_cleanup);
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
struct drm_framebuffer *old_fb = plane->fb;
int ret;
if (!old_fb)
if (!plane->fb)
return;
plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
plane->old_fb = NULL;
return;
}
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(old_fb);
__drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;
}
@ -2249,33 +2263,29 @@ out:
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
static int __setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
/* No fb means shut it down */
if (!fb) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
if (!ret) {
plane->crtc = NULL;
plane->fb = NULL;
} else {
old_fb = NULL;
plane->old_fb = NULL;
}
drm_modeset_unlock_all(dev);
goto out;
}
@ -2315,8 +2325,7 @@ static int setplane_internal(struct drm_plane *plane,
goto out;
}
drm_modeset_lock_all(dev);
old_fb = plane->fb;
plane->old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
@ -2325,18 +2334,37 @@ static int setplane_internal(struct drm_plane *plane,
plane->fb = fb;
fb = NULL;
} else {
old_fb = NULL;
plane->old_fb = NULL;
}
drm_modeset_unlock_all(dev);
out:
if (fb)
drm_framebuffer_unreference(fb);
if (old_fb)
drm_framebuffer_unreference(old_fb);
if (plane->old_fb)
drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
return ret;
}
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
int ret;
drm_modeset_lock_all(plane->dev);
ret = __setplane_internal(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
drm_modeset_unlock_all(plane->dev);
return ret;
}
/**
@ -2440,7 +2468,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
tmp->old_fb = tmp->primary->fb;
tmp->primary->old_fb = tmp->primary->fb;
fb = set->fb;
@ -2453,8 +2481,9 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
if (tmp->primary->fb)
drm_framebuffer_reference(tmp->primary->fb);
if (tmp->old_fb)
drm_framebuffer_unreference(tmp->old_fb);
if (tmp->primary->old_fb)
drm_framebuffer_unreference(tmp->primary->old_fb);
tmp->primary->old_fb = NULL;
}
return ret;
@ -2701,6 +2730,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
int ret = 0;
BUG_ON(!crtc->cursor);
WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
/*
* Obtain fb we'll be using (either new or existing) and take an extra
@ -2720,11 +2750,9 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
fb = NULL;
}
} else {
mutex_lock(&dev->mode_config.mutex);
fb = crtc->cursor->fb;
if (fb)
drm_framebuffer_reference(fb);
mutex_unlock(&dev->mode_config.mutex);
}
if (req->flags & DRM_MODE_CURSOR_MOVE) {
@ -2746,7 +2774,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
* setplane_internal will take care of deref'ing either the old or new
* framebuffer depending on success.
*/
ret = setplane_internal(crtc->cursor, crtc, fb,
ret = __setplane_internal(crtc->cursor, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
0, 0, src_w, src_h);
@ -2782,10 +2810,12 @@ static int drm_mode_cursor_common(struct drm_device *dev,
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
if (crtc->cursor)
return drm_mode_cursor_universal(crtc, req, file_priv);
drm_modeset_lock_crtc(crtc);
if (crtc->cursor) {
ret = drm_mode_cursor_universal(crtc, req, file_priv);
goto out;
}
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
@ -2809,7 +2839,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
}
}
out:
drm_modeset_unlock(&crtc->mutex);
drm_modeset_unlock_crtc(crtc);
return ret;
@ -3370,7 +3400,16 @@ void drm_fb_release(struct drm_file *priv)
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
mutex_lock(&priv->fbs_lock);
/*
* When the file gets released that means no one else can access the fb
* list any more, so no need to grab fpriv->fbs_lock. And we need to to
* avoid upsetting lockdep since the universal cursor code adds a
* framebuffer while holding mutex locks.
*
* Note that a real deadlock between fpriv->fbs_lock and the modeset
* locks is impossible here since no one else but this function can get
* at it any more.
*/
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
mutex_lock(&dev->mode_config.fb_lock);
@ -3383,7 +3422,6 @@ void drm_fb_release(struct drm_file *priv)
/* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
}
mutex_unlock(&priv->fbs_lock);
}
/**
@ -3495,9 +3533,10 @@ EXPORT_SYMBOL(drm_property_create_enum);
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property bitflags
* @num_values: number of pre-defined values
* @num_props: size of the @props array
* @supported_bits: bitmask of all supported enumeration values
*
* This creates a new generic drm property which can then be attached to a drm
* This creates a new bitmask drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
@ -4157,12 +4196,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
/**
* drm_mode_plane_set_obj_prop - set the value of a property
* @plane: drm plane object to set property value for
* @property: property to set
* @value: value the property should be set to
*
* This functions sets a given property on a given plane object. This function
* calls the driver's ->set_property callback and changes the software state of
* the property if the callback succeeds.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_plane *plane = obj_to_plane(obj);
struct drm_mode_object *obj = &plane->base;
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
@ -4171,6 +4223,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
/**
* drm_mode_getproperty_ioctl - get the current value of a object's property
@ -4309,7 +4362,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
property, arg->value);
break;
}
@ -4529,7 +4583,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
{
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int ret = -EINVAL;
@ -4545,7 +4599,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
drm_modeset_lock(&crtc->mutex, NULL);
drm_modeset_lock_crtc(crtc);
if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@ -4601,7 +4655,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
(void (*) (struct drm_pending_event *)) kfree;
}
old_fb = crtc->primary->fb;
crtc->primary->old_fb = crtc->primary->fb;
ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@ -4611,7 +4665,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
kfree(e);
}
/* Keep the old fb, don't unref it. */
old_fb = NULL;
crtc->primary->old_fb = NULL;
} else {
/*
* Warn if the driver hasn't properly updated the crtc->fb
@ -4627,9 +4681,10 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
out:
if (fb)
drm_framebuffer_unreference(fb);
if (old_fb)
drm_framebuffer_unreference(old_fb);
drm_modeset_unlock(&crtc->mutex);
if (crtc->primary->old_fb)
drm_framebuffer_unreference(crtc->primary->old_fb);
crtc->primary->old_fb = NULL;
drm_modeset_unlock_crtc(crtc);
return ret;
}
@ -4645,9 +4700,14 @@ out:
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
if (plane->funcs->reset)
plane->funcs->reset(plane);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);

Просмотреть файл

@ -36,6 +36,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
@ -49,9 +50,7 @@ static const struct drm_info_list drm_debugfs_list[] = {
{"clients", drm_clients_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
};
#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)

Просмотреть файл

@ -35,6 +35,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
/**
* Initialize the DMA data.
@ -124,7 +125,7 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
@ -142,8 +143,8 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
@ -154,7 +155,7 @@ void drm_core_reclaim_buffers(struct drm_device *dev,
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
drm_legacy_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
@ -166,5 +167,3 @@ void drm_core_reclaim_buffers(struct drm_device *dev,
}
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);

Просмотреть файл

@ -682,7 +682,7 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
int ret;
int ret, vcpi_ret;
mutex_lock(&mgr->payload_lock);
ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
@ -692,8 +692,16 @@ static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
goto out_unlock;
}
vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
if (vcpi_ret > mgr->max_payloads) {
ret = -EINVAL;
DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
goto out_unlock;
}
set_bit(ret, &mgr->payload_mask);
vcpi->vcpi = ret;
set_bit(vcpi_ret, &mgr->vcpi_mask);
vcpi->vcpi = vcpi_ret + 1;
mgr->proposed_vcpis[ret - 1] = vcpi;
out_unlock:
mutex_unlock(&mgr->payload_lock);
@ -701,15 +709,23 @@ out_unlock:
}
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
int id)
int vcpi)
{
if (id == 0)
int i;
if (vcpi == 0)
return;
mutex_lock(&mgr->payload_lock);
DRM_DEBUG_KMS("putting payload %d\n", id);
clear_bit(id, &mgr->payload_mask);
mgr->proposed_vcpis[id - 1] = NULL;
DRM_DEBUG_KMS("putting payload %d\n", vcpi);
clear_bit(vcpi - 1, &mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->proposed_vcpis[i])
if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
mgr->proposed_vcpis[i] = NULL;
clear_bit(i + 1, &mgr->payload_mask);
}
}
mutex_unlock(&mgr->payload_lock);
}
@ -1563,7 +1579,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
}
drm_dp_dpcd_write_payload(mgr, id, payload);
payload->payload_state = 0;
payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
return 0;
}
@ -1590,7 +1606,7 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
*/
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
{
int i;
int i, j;
int cur_slots = 1;
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
@ -1607,26 +1623,46 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
port = NULL;
req_payload.num_slots = 0;
}
if (mgr->payloads[i].start_slot != req_payload.start_slot) {
mgr->payloads[i].start_slot = req_payload.start_slot;
}
/* work out what is required to happen with this payload */
if (mgr->payloads[i].start_slot != req_payload.start_slot ||
mgr->payloads[i].num_slots != req_payload.num_slots) {
if (mgr->payloads[i].num_slots != req_payload.num_slots) {
/* need to push an update for this payload */
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
} else
req_payload.payload_state = 0;
mgr->payloads[i].start_slot = req_payload.start_slot;
mgr->payloads[i].start_slot = 0;
}
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
}
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
DRM_DEBUG_KMS("removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
set_bit(j + 1, &mgr->payload_mask);
} else {
clear_bit(j + 1, &mgr->payload_mask);
}
}
memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
clear_bit(mgr->max_payloads, &mgr->payload_mask);
}
}
mutex_unlock(&mgr->payload_lock);
return 0;
@ -1657,9 +1693,9 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
}
if (ret) {
mutex_unlock(&mgr->payload_lock);
@ -1772,7 +1808,7 @@ static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
case DP_LINK_BW_5_4:
return 10 * dp_link_count;
}
return 0;
BUG();
}
/**
@ -1861,6 +1897,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
mgr->vcpi_mask = 0;
}
out_unlock:
@ -2071,6 +2108,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
* @handled: whether the hpd interrupt was consumed or not
*
* This should be called from the driver when it detects a short IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
@ -2474,7 +2512,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
mutex_unlock(&mgr->lock);
mutex_lock(&mgr->payload_lock);
seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->proposed_vcpis[i]) {

Просмотреть файл

@ -35,32 +35,20 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
#include "drm_legacy.h"
#include "drm_internal.h"
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
/*
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
@ -68,22 +56,19 @@ static struct idr drm_minors_idr;
struct class *drm_class;
static struct dentry *drm_debugfs_root;
int drm_err(const char *func, const char *format, ...)
void drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
va_list args;
int r;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
va_end(args);
return r;
}
EXPORT_SYMBOL(drm_err);
@ -102,6 +87,8 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
}
EXPORT_SYMBOL(drm_ut_debug_printk);
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
struct drm_master *drm_master_create(struct drm_minor *minor)
{
struct drm_master *master;
@ -133,7 +120,6 @@ EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
@ -143,7 +129,7 @@ static void drm_master_destroy(struct kref *kref)
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_rmmap_locked(dev, r_list->map);
drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
@ -154,12 +140,6 @@ static void drm_master_destroy(struct kref *kref)
master->unique_len = 0;
}
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
kfree(pt);
}
drm_ht_remove(&master->magiclist);
mutex_unlock(&dev->struct_mutex);
@ -615,7 +595,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
goto err_ht;
}
if (driver->driver_features & DRIVER_GEM) {
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
@ -645,7 +625,7 @@ static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
if (dev->driver->driver_features & DRIVER_GEM)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_destroy(dev);
drm_legacy_ctxbitmap_cleanup(dev);
@ -779,7 +759,7 @@ void drm_dev_unregister(struct drm_device *dev)
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_legacy_rmmap(dev, r_list->map);
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);

Просмотреть файл

@ -632,27 +632,27 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
/* 6 - 720(1440)x480i@60Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
/* 7 - 720(1440)x480i@60Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
/* 8 - 720(1440)x240@60Hz */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
/* 9 - 720(1440)x240@60Hz */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@ -714,27 +714,27 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
/* 21 - 720(1440)x576i@50Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
/* 22 - 720(1440)x576i@50Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
/* 23 - 720(1440)x288@50Hz */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
/* 24 - 720(1440)x288@50Hz */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@ -837,17 +837,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
/* 44 - 720(1440)x576i@100Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
/* 45 - 720(1440)x576i@100Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
@ -870,15 +870,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
/* 50 - 720(1440)x480i@120Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
/* 51 - 720(1440)x480i@120Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@ -892,15 +892,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
/* 54 - 720(1440)x576i@200Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
/* 55 - 720(1440)x576i@200Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@ -914,15 +914,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
/* 58 - 720(1440)x480i@240 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
/* 59 - 720(1440)x480i@240 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@ -2103,7 +2103,8 @@ static int
add_inferred_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
.connector = connector,
.edid = edid,
};
if (version_greater(edid, 1, 0))
@ -2169,7 +2170,8 @@ add_established_modes(struct drm_connector *connector, struct edid *edid)
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
.connector = connector,
.edid = edid,
};
for (i = 0; i <= EDID_EST_TIMINGS; i++) {
@ -2227,7 +2229,8 @@ add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
int i, modes = 0;
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
.connector = connector,
.edid = edid,
};
for (i = 0; i < EDID_STD_TIMINGS; i++) {
@ -2313,7 +2316,8 @@ static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
connector, edid, 0, 0, 0
.connector = connector,
.edid = edid,
};
if (version_greater(edid, 1, 2))
@ -2357,11 +2361,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
u32 quirks)
{
struct detailed_mode_closure closure = {
connector,
edid,
1,
quirks,
0
.connector = connector,
.edid = edid,
.preferred = 1,
.quirks = quirks,
};
if (closure.preferred && !version_greater(edid, 1, 3))
@ -3433,10 +3436,10 @@ EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
* drm_assign_hdmi_deep_color_info - detect whether monitor supports
* hdmi deep color modes and update drm_display_info if so.
*
* @edid: monitor EDID information
* @info: Updated with maximum supported deep color bpc and color format
* if deep color supported.
* @connector: DRM connector, used only for debug output
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI deep color supported, false if not or unknown.

Просмотреть файл

@ -126,7 +126,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
if (!temp)
return -ENOMEM;
@ -171,60 +171,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_cmdline_mode *mode;
struct drm_connector *connector;
char *option = NULL;
fb_helper_conn = fb_helper->connector_info[i];
connector = fb_helper_conn->connector;
mode = &fb_helper_conn->cmdline_mode;
/* do something on return - turn off connector maybe */
if (fb_get_options(connector->name, &option))
continue;
if (drm_mode_parse_command_line_for_connector(option,
connector,
mode)) {
if (mode->force) {
const char *s;
switch (mode->force) {
case DRM_FORCE_OFF:
s = "OFF";
break;
case DRM_FORCE_ON_DIGITAL:
s = "ON - dig";
break;
default:
case DRM_FORCE_ON:
s = "ON";
break;
}
DRM_INFO("forcing %s connector %s\n",
connector->name, s);
connector->force = mode->force;
}
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
connector->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
mode->margins ? " with margins" : "",
mode->interlace ? " interlaced" : "");
}
}
return 0;
}
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
uint16_t *r_base, *g_base, *b_base;
@ -345,10 +291,17 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
if (dev->mode_config.rotation_property) {
drm_mode_plane_set_obj_prop(plane,
dev->mode_config.rotation_property,
BIT(DRM_ROTATE_0));
}
}
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
@ -419,11 +372,11 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
/* NOTE: we use lockless flag below to avoid grabbing other
* modeset locks. So just trylock the underlying mutex
* directly:
/*
* NOTE: Use trylock mode to avoid deadlocks and sleeping in
* panic context.
*/
if (!mutex_trylock(&dev->mode_config.mutex)) {
if (__drm_modeset_lock_all(dev, true) != 0) {
error = true;
continue;
}
@ -432,7 +385,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (ret)
error = true;
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
}
return error;
}
@ -1013,7 +966,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
cmdline_mode = &fb_helper_conn->cmdline_mode;
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
@ -1260,9 +1213,7 @@ EXPORT_SYMBOL(drm_has_preferred_mode);
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
struct drm_cmdline_mode *cmdline_mode;
cmdline_mode = &fb_connector->cmdline_mode;
return cmdline_mode->specified;
return fb_connector->connector->cmdline_mode.specified;
}
struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
@ -1272,7 +1223,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
struct drm_display_mode *mode = NULL;
bool prefer_non_interlace;
cmdline_mode = &fb_helper_conn->cmdline_mode;
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
@ -1657,8 +1608,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
struct drm_device *dev = fb_helper->dev;
int count = 0;
drm_fb_helper_parse_command_line(fb_helper);
mutex_lock(&dev->mode_config.mutex);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,

Просмотреть файл

@ -39,10 +39,10 @@
#include <linux/slab.h>
#include <linux/module.h>
#include "drm_legacy.h"
#include "drm_internal.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
EXPORT_SYMBOL(drm_global_mutex);
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
@ -171,7 +171,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
if (dev->driver->driver_features & DRIVER_GEM)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
@ -256,7 +256,7 @@ out_close:
out_prime_destroy:
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&priv->prime);
if (dev->driver->driver_features & DRIVER_GEM)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, priv);
put_pid(priv->pid);
kfree(priv);
@ -268,11 +268,11 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
if (drm_i_have_hw_lock(dev, file_priv)) {
if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_legacy_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
@ -330,8 +330,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
*/
int drm_lastclose(struct drm_device * dev)
{
struct drm_vma_entry *vma, *vma_temp;
DRM_DEBUG("\n");
if (dev->driver->lastclose)
@ -346,13 +344,7 @@ int drm_lastclose(struct drm_device * dev)
drm_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
drm_legacy_vma_flush(dev);
drm_legacy_dma_takedown(dev);
mutex_unlock(&dev->struct_mutex);
@ -412,14 +404,14 @@ int drm_release(struct inode *inode, struct file *filp)
drm_master_release(dev, filp);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_core_reclaim_buffers(dev, file_priv);
drm_legacy_reclaim_buffers(dev, file_priv);
drm_events_release(file_priv);
if (dev->driver->driver_features & DRIVER_MODESET)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_fb_release(file_priv);
if (dev->driver->driver_features & DRIVER_GEM)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file_priv);
drm_legacy_ctxbitmap_flush(dev, file_priv);
@ -464,6 +456,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
WARN_ON(!list_empty(&file_priv->event_list));
put_pid(file_priv->pid);
kfree(file_priv);

Просмотреть файл

@ -38,6 +38,8 @@
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
#include "drm_internal.h"
/** @file drm_gem.c
*
@ -146,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_object_init);
/**
* drm_gem_object_init - initialize an allocated private GEM object
* drm_gem_private_object_init - initialize an allocated private GEM object
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
@ -579,7 +581,7 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_gem_close *args = data;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
ret = drm_gem_handle_delete(file_priv, args->handle);
@ -606,7 +608,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@ -659,7 +661,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
int ret;
u32 handle;
if (!(dev->driver->driver_features & DRIVER_GEM))
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
mutex_lock(&dev->object_name_lock);
@ -887,7 +889,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma_pages(vma));
if (!node) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
return -EINVAL;
} else if (!drm_vma_node_is_allowed(node, filp)) {
mutex_unlock(&dev->struct_mutex);
return -EACCES;

Просмотреть файл

@ -316,7 +316,8 @@ out:
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct drm_gem_cma_object *cma_obj;
@ -325,14 +326,14 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
return ERR_PTR(-EINVAL);
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, size);
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
if (IS_ERR(cma_obj))
return ERR_CAST(cma_obj);
cma_obj->paddr = sg_dma_address(sgt->sgl);
cma_obj->sgt = sgt;
DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size);
DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
return &cma_obj->base;
}

Просмотреть файл

@ -35,6 +35,9 @@
#include <linux/seq_file.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include "drm_legacy.h"
/**
* Called when "/proc/dri/.../name" is read.
@ -183,15 +186,32 @@ int drm_clients_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_file *priv;
seq_printf(m,
"%20s %5s %3s master a %5s %10s\n",
"command",
"pid",
"dev",
"uid",
"magic");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
*/
mutex_lock(&dev->struct_mutex);
seq_printf(m, "a dev pid uid magic\n\n");
list_for_each_entry(priv, &dev->filelist, lhead) {
seq_printf(m, "%c %3d %5d %5d %10u\n",
priv->authenticated ? 'y' : 'n',
priv->minor->index,
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
struct task_struct *task;
rcu_read_lock(); /* locks pid_task()->comm */
task = pid_task(priv->pid, PIDTYPE_PID);
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
task ? task->comm : "<unknown>",
pid_vnr(priv->pid),
priv->minor->index,
priv->is_master ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
from_kuid_munged(seq_user_ns(m), priv->uid),
priv->magic);
rcu_read_unlock();
}
mutex_unlock(&dev->struct_mutex);
return 0;
@ -223,62 +243,3 @@ int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
#if DRM_DEBUG_CODE
int drm_vma_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
unsigned long vma_count = 0;
#if defined(__i386__)
unsigned int pgprot;
#endif
mutex_lock(&dev->struct_mutex);
list_for_each_entry(pt, &dev->vmalist, head)
vma_count++;
seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
vma_count, high_memory,
(void *)(unsigned long)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
if (!vma)
continue;
seq_printf(m,
"\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
pt->pid,
(void *)vma->vm_start, (void *)vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_pgoff);
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
seq_printf(m, " %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_PSE ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l');
#endif
seq_printf(m, "\n");
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
#endif

Просмотреть файл

@ -0,0 +1,132 @@
/*
* Copyright © 2014 Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/* drm_irq.c */
extern unsigned int drm_timestamp_monotonic;
/* drm_fops.c */
extern struct mutex drm_global_mutex;
int drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* drm_vm.c */
int drm_vma_info(struct seq_file *m, void *data);
void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf);
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
int drm_vm_info(struct seq_file *m, void *data);
int drm_bufs_info(struct seq_file *m, void *data);
int drm_vblank_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_irq.c */
int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* drm_auth.c */
int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
/* drm_sysfs.c */
extern struct class *drm_class;
struct class *drm_sysfs_create(struct module *owner, char *name);
void drm_sysfs_destroy(void);
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
void drm_sysfs_connector_remove(struct drm_connector *connector);
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
/* drm_drv.c */
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct drm_master *drm_master_create(struct drm_minor *minor);
/* drm_debugfs.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root);
int drm_debugfs_cleanup(struct drm_minor *minor);
int drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
{
return 0;
}
static inline int drm_debugfs_cleanup(struct drm_minor *minor)
{
return 0;
}
static inline int drm_debugfs_connector_add(struct drm_connector *connector)
{
return 0;
}
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
#endif

Просмотреть файл

@ -31,6 +31,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
#include "drm_legacy.h"
#include "drm_internal.h"
#include <linux/pci.h>
#include <linux/export.h>
@ -41,121 +42,6 @@
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if __OS_HAS_AGP
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
* Get the bus id.
*
@ -167,7 +53,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
*
* Copies the bus id from drm_device::unique into user space.
*/
int drm_getunique(struct drm_device *dev, void *data,
static int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
@ -189,7 +75,6 @@ drm_unset_busid(struct drm_device *dev,
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
master->unique_size = 0;
}
/**
@ -207,7 +92,7 @@ drm_unset_busid(struct drm_device *dev,
* version 1.1 or greater. Also note that KMS is all version 1.1 and later and
* UMS was only ever supported on pci devices.
*/
int drm_setunique(struct drm_device *dev, void *data,
static int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
@ -245,15 +130,15 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
if (master->unique != NULL)
drm_unset_busid(dev, master);
if (dev->driver->bus && dev->driver->bus->set_busid) {
ret = dev->driver->bus->set_busid(dev, master);
if (dev->driver->set_busid) {
ret = dev->driver->set_busid(dev, master);
if (ret) {
drm_unset_busid(dev, master);
return ret;
}
} else {
if (WARN(dev->unique == NULL,
"No drm_bus.set_busid() implementation provided by "
"No drm_driver.set_busid() implementation provided by "
"%ps. Use drm_dev_set_unique() to set the unique "
"name explicitly.", dev->driver))
return -EINVAL;
@ -279,7 +164,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
int drm_getmap(struct drm_device *dev, void *data,
static int drm_getmap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
@ -340,7 +225,7 @@ int drm_getmap(struct drm_device *dev, void *data,
* Searches for the client with the specified index and copies its information
* into userspace
*/
int drm_getclient(struct drm_device *dev, void *data,
static int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
@ -380,7 +265,7 @@ int drm_getclient(struct drm_device *dev, void *data,
*
* \return zero on success or a negative number on failure.
*/
int drm_getstats(struct drm_device *dev, void *data,
static int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_stats *stats = data;
@ -394,7 +279,7 @@ int drm_getstats(struct drm_device *dev, void *data,
/**
* Get device/driver capabilities
*/
int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_get_cap *req = data;
@ -444,7 +329,7 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* Set device/driver capabilities
*/
int
static int
drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_set_client_cap *req = data;
@ -478,7 +363,7 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Sets the requested interface version
*/
int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_set_version *sv = data;
int if_version, retcode = 0;
@ -624,6 +509,121 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if __OS_HAS_AGP
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
* Called whenever a process performs an ioctl on /dev/drm.
*

Просмотреть файл

@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include "drm_trace.h"
#include "drm_internal.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/slab.h>
@ -55,12 +56,91 @@
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
/*
* Clear vblank timestamp buffer for a crtc.
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
unsigned int drm_timestamp_monotonic = 1;
static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @crtc). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
* call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 cur_vblank, diff, tslot;
bool rc;
struct timeval t_vblank;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*
* We repeat the hardware vblank counter & timestamp query until
* we get consistent results. This to prevent races between gpu
* updating its hardware counter while we are retrieving the
* corresponding vblank timestamp.
*/
do {
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
/* Deal with counter wrap */
diff = cur_vblank - vblank->last;
if (cur_vblank < vblank->last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, vblank->last, cur_vblank, diff);
}
DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
crtc, diff);
if (diff == 0)
return;
/* Reinitialize corresponding vblank timestamp if high-precision query
* available. Skip this step if query unsupported or failed. Will
* reinitialize delayed at next vblank interrupt in that case.
*/
if (rc) {
tslot = atomic_read(&vblank->count) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic();
atomic_add(diff, &vblank->count);
smp_mb__after_atomic();
}
/*
@ -71,10 +151,11 @@ static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
*/
static void vblank_disable_and_save(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
u32 vblcount;
s64 diff_ns;
int vblrc;
bool vblrc;
struct timeval tvblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
@ -84,8 +165,28 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
*/
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/*
* If the vblank interrupt was already disbled update the count
* and timestamp to maintain the appearance that the counter
* has been ticking all along until this time. This makes the
* count account for the entire time between drm_vblank_on() and
* drm_vblank_off().
*
* But only do this if precise vblank timestamps are available.
* Otherwise we might read a totally bogus timestamp since drivers
* lacking precise timestamp support rely upon sampling the system clock
* at vblank interrupt time. Which obviously won't work out well if the
* vblank interrupt is disabled.
*/
if (!vblank->enabled &&
drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
drm_update_vblank_count(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return;
}
dev->driver->disable_vblank(dev, crtc);
dev->vblank[crtc].enabled = false;
vblank->enabled = false;
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
@ -100,9 +201,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* delayed gpu counter increment.
*/
do {
dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
vblank->last = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
} while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
} while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
if (!count)
vblrc = 0;
@ -110,7 +211,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
*/
vblcount = atomic_read(&dev->vblank[crtc].count);
vblcount = atomic_read(&vblank->count);
diff_ns = timeval_to_ns(&tvblank) -
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
@ -126,14 +227,18 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->vblank[crtc].count);
if (vblrc && (abs64(diff_ns) > 1000000)) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
smp_mb__before_atomic();
atomic_inc(&vblank->count);
smp_mb__after_atomic();
}
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
@ -164,14 +269,20 @@ static void vblank_disable_fn(unsigned long arg)
void drm_vblank_cleanup(struct drm_device *dev)
{
int crtc;
unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
del_timer_sync(&dev->vblank[crtc].disable_timer);
vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
del_timer_sync(&vblank->disable_timer);
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
kfree(dev->vblank);
@ -204,11 +315,13 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
goto err;
for (i = 0; i < num_crtcs; i++) {
dev->vblank[i].dev = dev;
dev->vblank[i].crtc = i;
init_waitqueue_head(&dev->vblank[i].queue);
setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
(unsigned long)&dev->vblank[i]);
struct drm_vblank_crtc *vblank = &dev->vblank[i];
vblank->dev = dev;
vblank->crtc = i;
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@ -224,7 +337,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
return 0;
err:
drm_vblank_cleanup(dev);
dev->num_crtcs = 0;
return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
@ -360,9 +473,11 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
wake_up(&dev->vblank[i].queue);
dev->vblank[i].enabled = false;
dev->vblank[i].last =
struct drm_vblank_crtc *vblank = &dev->vblank[i];
wake_up(&vblank->queue);
vblank->enabled = false;
vblank->last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@ -617,7 +732,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* within vblank area, counting down the number of lines until
* start of scanout.
*/
invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK;
/* Convert scanout position into elapsed time at raw_time query
* since start of scanout at first display scanline. delta_ns
@ -647,7 +762,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
if (invbl)
vbl_status |= DRM_VBLANKTIME_INVBL;
vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
return vbl_status;
}
@ -679,10 +794,11 @@ static struct timeval get_drm_timestamp(void)
* call, i.e., it isn't very precisely locked to the true vblank.
*
* Returns:
* Non-zero if timestamp is considered to be very precise, zero otherwise.
* True if timestamp is considered to be very precise, false otherwise.
*/
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
{
int ret;
@ -694,7 +810,7 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
tvblank, flags);
if (ret > 0)
return (u32) ret;
return true;
}
/* GPU high precision timestamp query unsupported or failed.
@ -702,9 +818,8 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
*/
*tvblank = get_drm_timestamp();
return 0;
return false;
}
EXPORT_SYMBOL(drm_get_last_vbltimestamp);
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
@ -720,7 +835,11 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
return atomic_read(&dev->vblank[crtc].count);
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
if (WARN_ON(crtc >= dev->num_crtcs))
return 0;
return atomic_read(&vblank->count);
}
EXPORT_SYMBOL(drm_vblank_count);
@ -740,18 +859,22 @@ EXPORT_SYMBOL(drm_vblank_count);
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 cur_vblank;
if (WARN_ON(crtc >= dev->num_crtcs))
return 0;
/* Read timestamp from slot of _vblank_time ringbuffer
* that corresponds to current vblank count. Retry if
* count has incremented during readout. This works like
* a seqlock.
*/
do {
cur_vblank = atomic_read(&dev->vblank[crtc].count);
cur_vblank = atomic_read(&vblank->count);
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
smp_rmb();
} while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
} while (cur_vblank != atomic_read(&vblank->count));
return cur_vblank;
}
@ -799,70 +922,6 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_send_vblank_event);
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @crtc). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
* call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
u32 cur_vblank, diff, tslot, rc;
struct timeval t_vblank;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*
* We repeat the hardware vblank counter & timestamp query until
* we get consistent results. This to prevent races between gpu
* updating its hardware counter while we are retrieving the
* corresponding vblank timestamp.
*/
do {
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
/* Deal with counter wrap */
diff = cur_vblank - dev->vblank[crtc].last;
if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
/* Reinitialize corresponding vblank timestamp if high-precision query
* available. Skip this step if query unsupported or failed. Will
* reinitialize delayed at next vblank interrupt in that case.
*/
if (rc) {
tslot = atomic_read(&dev->vblank[crtc].count) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic();
atomic_add(diff, &dev->vblank[crtc].count);
smp_mb__after_atomic();
}
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
@ -870,13 +929,14 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
*/
static int drm_vblank_enable(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
spin_lock(&dev->vblank_time_lock);
if (!dev->vblank[crtc].enabled) {
if (!vblank->enabled) {
/*
* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
@ -887,9 +947,9 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
if (ret)
atomic_dec(&dev->vblank[crtc].refcount);
atomic_dec(&vblank->refcount);
else {
dev->vblank[crtc].enabled = true;
vblank->enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
@ -914,16 +974,20 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
int ret = 0;
if (WARN_ON(crtc >= dev->num_crtcs))
return -EINVAL;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
if (atomic_add_return(1, &vblank->refcount) == 1) {
ret = drm_vblank_enable(dev, crtc);
} else {
if (!dev->vblank[crtc].enabled) {
atomic_dec(&dev->vblank[crtc].refcount);
if (!vblank->enabled) {
atomic_dec(&vblank->refcount);
ret = -EINVAL;
}
}
@ -963,13 +1027,23 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
BUG_ON(atomic_read(&vblank->refcount) == 0);
if (WARN_ON(crtc >= dev->num_crtcs))
return;
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank[crtc].disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
return;
else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
vblank_disable_fn((unsigned long)vblank);
else
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
}
EXPORT_SYMBOL(drm_vblank_put);
@ -988,6 +1062,50 @@ void drm_crtc_vblank_put(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_put);
/**
* drm_wait_one_vblank - wait for one vblank
* @dev: DRM device
* @crtc: crtc index
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
void drm_wait_one_vblank(struct drm_device *dev, int crtc)
{
int ret;
u32 last;
ret = drm_vblank_get(dev, crtc);
if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
return;
last = drm_vblank_count(dev, crtc);
ret = wait_event_timeout(dev->vblank[crtc].queue,
last != drm_vblank_count(dev, crtc),
msecs_to_jiffies(100));
WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
drm_vblank_put(dev, crtc);
}
EXPORT_SYMBOL(drm_wait_one_vblank);
/**
* drm_crtc_wait_one_vblank - wait for one vblank
* @crtc: DRM crtc
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
{
drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
@ -1004,19 +1122,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (WARN_ON(crtc >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
vblank_disable_and_save(dev, crtc);
wake_up(&dev->vblank[crtc].queue);
wake_up(&vblank->queue);
/*
* Prevent subsequent drm_vblank_get() from re-enabling
* the vblank interrupt by bumping the refcount.
*/
if (!vblank->inmodeset) {
atomic_inc(&vblank->refcount);
vblank->inmodeset = 1;
}
spin_unlock(&dev->vbl_lock);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
@ -1027,9 +1160,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe);
send_vblank_event(dev, e, seq, &now);
}
spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
@ -1066,11 +1197,35 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
*/
void drm_vblank_on(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
if (WARN_ON(crtc >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* re-enable interrupts if there's are users left */
if (atomic_read(&dev->vblank[crtc].refcount) != 0)
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
vblank->inmodeset = 0;
}
/*
* sample the current counter to avoid random jumps
* when drm_vblank_enable() applies the diff
*
* -1 to make sure user will never see the same
* vblank counter value before and after a modeset
*/
vblank->last =
(dev->driver->get_vblank_counter(dev, crtc) - 1) &
dev->max_vblank_count;
/*
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
if (atomic_read(&vblank->refcount) != 0 ||
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
WARN_ON(drm_vblank_enable(dev, crtc));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@ -1118,9 +1273,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
if (WARN_ON(crtc >= dev->num_crtcs))
return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
@ -1128,10 +1289,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 0x1;
if (!vblank->inmodeset) {
vblank->inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
dev->vblank[crtc].inmodeset |= 0x2;
vblank->inmodeset |= 0x2;
}
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
@ -1146,21 +1307,22 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
*/
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
if (dev->vblank[crtc].inmodeset) {
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (dev->vblank[crtc].inmodeset & 0x2)
if (vblank->inmodeset & 0x2)
drm_vblank_put(dev, crtc);
dev->vblank[crtc].inmodeset = 0;
vblank->inmodeset = 0;
}
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
@ -1212,6 +1374,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e;
struct timeval now;
unsigned long flags;
@ -1235,6 +1398,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
/*
* drm_vblank_off() might have been called after we called
* drm_vblank_get(). drm_vblank_off() holds event_lock
* around the vblank disable, so no need for further locking.
* The reference from drm_vblank_get() protects against
* vblank disable from another source.
*/
if (!vblank->enabled) {
ret = -EINVAL;
goto err_unlock;
}
if (file_priv->event_space < sizeof e->event) {
ret = -EBUSY;
goto err_unlock;
@ -1295,6 +1470,7 @@ err_put:
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
unsigned int flags, seq, crtc, high_crtc;
@ -1324,6 +1500,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (crtc >= dev->num_crtcs)
return -EINVAL;
vblank = &dev->vblank[crtc];
ret = drm_vblank_get(dev, crtc);
if (ret) {
DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
@ -1356,11 +1534,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
dev->vblank[crtc].last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->vblank[crtc].enabled ||
!vblank->enabled ||
!dev->irq_enabled));
if (ret != -EINTR) {
@ -1385,12 +1563,11 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
unsigned int seq;
seq = drm_vblank_count_and_time(dev, crtc, &now);
assert_spin_locked(&dev->event_lock);
spin_lock_irqsave(&dev->event_lock, flags);
seq = drm_vblank_count_and_time(dev, crtc, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
@ -1406,8 +1583,6 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
trace_drm_vblank_event(crtc, seq);
}
@ -1421,6 +1596,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 vblcount;
s64 diff_ns;
struct timeval tvblank;
@ -1429,15 +1605,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
if (!dev->num_crtcs)
return false;
if (WARN_ON(crtc >= dev->num_crtcs))
return false;
spin_lock_irqsave(&dev->event_lock, irqflags);
/* Need timestamp lock to prevent concurrent execution with
* vblank enable/disable, as this would cause inconsistent
* or corrupted timestamps and vblank counts.
*/
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
spin_lock(&dev->vblank_time_lock);
/* Vblank irq handling disabled. Nothing to do. */
if (!dev->vblank[crtc].enabled) {
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
if (!vblank->enabled) {
spin_unlock(&dev->vblank_time_lock);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return false;
}
@ -1446,7 +1628,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
*/
/* Get current timestamp and count. */
vblcount = atomic_read(&dev->vblank[crtc].count);
vblcount = atomic_read(&vblank->count);
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
@ -1470,17 +1652,20 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* the timestamp computed above.
*/
smp_mb__before_atomic();
atomic_inc(&dev->vblank[crtc].count);
atomic_inc(&vblank->count);
smp_mb__after_atomic();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
}
wake_up(&dev->vblank[crtc].queue);
spin_unlock(&dev->vblank_time_lock);
wake_up(&vblank->queue);
drm_handle_vblank_events(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);

Просмотреть файл

@ -23,6 +23,15 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* This file contains legacy interfaces that modern drm drivers
* should no longer be using. They cannot be removed as legacy
* drivers use them, and removing them are API breaks.
*/
#include <linux/list.h>
#include <drm/drm_legacy.h>
struct agp_memory;
struct drm_device;
struct drm_file;
@ -48,4 +57,57 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
/*
* Generic Buffer Management
*/
#define DRM_MAP_HASH_OFFSET 0x10000000
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
void drm_legacy_vma_flush(struct drm_device *d);
/*
* AGP Support
*/
struct drm_agp_mem {
unsigned long handle;
struct agp_memory *memory;
unsigned long bound;
int pages;
struct list_head head;
};
/*
* Generic Userspace Locking-API
*/
int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
/* DMA support */
int drm_legacy_dma_setup(struct drm_device *dev);
void drm_legacy_dma_takedown(struct drm_device *dev);
void drm_legacy_free_buffer(struct drm_device *dev,
struct drm_buf * buf);
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp);
/* Scatter Gather Support */
void drm_legacy_sg_cleanup(struct drm_device *dev);
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif /* __DRM_LEGACY_H__ */

Просмотреть файл

@ -36,6 +36,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
#include "drm_internal.h"
static int drm_notifier(void *priv);
@ -52,7 +53,8 @@ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
@ -120,7 +122,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
block_all_signals(drm_notifier, dev, &dev->sigmask);
}
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
@ -146,7 +148,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Transfer and free the lock.
*/
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
@ -157,7 +159,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
if (drm_lock_free(&master->lock, lock->context)) {
if (drm_legacy_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
@ -250,7 +252,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@ -286,26 +288,27 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
* If the lock is not held, then let the signal proceed as usual. If the lock
* is held, then set the contended flag and keep the signal blocked.
*
* \param priv pointer to a drm_sigdata structure.
* \param priv pointer to a drm_device structure.
* \return one if the signal should be delivered normally, or zero if the
* signal should be blocked.
*/
static int drm_notifier(void *priv)
{
struct drm_sigdata *s = (struct drm_sigdata *) priv;
struct drm_device *dev = priv;
struct drm_hw_lock *lock = dev->sigdata.lock;
unsigned int old, new, prev;
/* Allow signal delivery if lock isn't held */
if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
if (!lock || !_DRM_LOCK_IS_HELD(lock->lock)
|| _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context)
return 1;
/* Otherwise, set flag to force call to
drmUnlock */
do {
old = s->lock->lock;
old = lock->lock;
new = old | _DRM_LOCK_CONT;
prev = cmpxchg(&s->lock->lock, old, new);
prev = cmpxchg(&lock->lock, old, new);
} while (prev != old);
return 0;
}
@ -323,7 +326,7 @@ static int drm_notifier(void *priv)
* having to worry about starvation.
*/
void drm_idlelock_take(struct drm_lock_data *lock_data)
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
@ -340,9 +343,9 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_take);
EXPORT_SYMBOL(drm_legacy_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@ -360,9 +363,10 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_release);
EXPORT_SYMBOL(drm_legacy_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
int drm_legacy_i_have_hw_lock(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&

Просмотреть файл

@ -36,8 +36,20 @@
#include <linux/highmem.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
#if __OS_HAS_AGP
#ifdef HAVE_PAGE_AGP
# include <asm/agp.h>
#else
# ifdef __powerpc__
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
#endif
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
@ -108,25 +120,25 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
#endif /* agp */
void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
}
EXPORT_SYMBOL(drm_core_ioremap);
EXPORT_SYMBOL(drm_legacy_ioremap);
void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
}
EXPORT_SYMBOL(drm_core_ioremap_wc);
EXPORT_SYMBOL(drm_legacy_ioremap_wc);
void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
return;
@ -136,4 +148,4 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
else
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_core_ioremapfree);
EXPORT_SYMBOL(drm_legacy_ioremapfree);

Просмотреть файл

@ -231,6 +231,9 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
break;
}
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg.flags = MIPI_DSI_MSG_USE_LPM;
return ops->transfer(dsi->host, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_write);
@ -260,6 +263,9 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
if (!ops || !ops->transfer)
return -ENOSYS;
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg.flags = MIPI_DSI_MSG_USE_LPM;
return ops->transfer(dsi->host, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_read);

Просмотреть файл

@ -1259,6 +1259,7 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
if (!mode)
return NULL;
mode->type |= DRM_MODE_TYPE_USERDEF;
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}

Просмотреть файл

@ -56,6 +56,212 @@
*/
/**
* __drm_modeset_lock_all - internal helper to grab all modeset locks
* @dev: DRM device
* @trylock: trylock mode for atomic contexts
*
* This is a special version of drm_modeset_lock_all() which can also be used in
* atomic contexts. Then @trylock must be set to true.
*
* Returns:
* 0 on success or negative error code on failure.
*/
int __drm_modeset_lock_all(struct drm_device *dev,
bool trylock)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx),
trylock ? GFP_ATOMIC : GFP_KERNEL);
if (!ctx)
return -ENOMEM;
if (trylock) {
if (!mutex_trylock(&config->mutex))
return -EBUSY;
} else {
mutex_lock(&config->mutex);
}
drm_modeset_acquire_init(ctx, 0);
ctx->trylock_only = trylock;
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
WARN_ON(config->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
*/
config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev);
return 0;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
return ret;
}
EXPORT_SYMBOL(__drm_modeset_lock_all);
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
if (WARN_ON(!ctx))
return;
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
kfree(ctx);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
* drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
* @crtc: drm crtc
*
* This function locks the given crtc using a hidden acquire context. This is
* necessary so that drivers internally using the atomic interfaces can grab
* further locks with the lock acquire context.
*/
void drm_modeset_lock_crtc(struct drm_crtc *crtc)
{
struct drm_modeset_acquire_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
drm_modeset_acquire_init(ctx, 0);
retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail;
WARN_ON(crtc->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_crtc():
*/
crtc->acquire_ctx = ctx;
return;
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
}
EXPORT_SYMBOL(drm_modeset_lock_crtc);
/**
* drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
* @crtc: drm crtc
*
* Legacy ioctl operations like cursor updates or page flips only have per-crtc
* locking, and store the acquire ctx in the corresponding crtc. All other
* legacy operations take all locks and use a global acquire context. This
* function grabs the right one.
*/
struct drm_modeset_acquire_ctx *
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
{
if (crtc->acquire_ctx)
return crtc->acquire_ctx;
WARN_ON(!crtc->dev->mode_config.acquire_ctx);
return crtc->dev->mode_config.acquire_ctx;
}
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
/**
* drm_modeset_unlock_crtc - drop crtc lock
* @crtc: drm crtc
*
* This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
* locks acquired through the hidden context.
*/
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
{
struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
if (WARN_ON(!ctx))
return;
crtc->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
kfree(ctx);
}
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*
* Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
/* Locking is currently fubar in the panic handler. */
if (oops_in_progress)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
@ -108,7 +314,12 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
WARN_ON(ctx->contended);
if (interruptible && slow) {
if (ctx->trylock_only) {
if (!ww_mutex_trylock(&lock->mutex))
return -EBUSY;
else
return 0;
} else if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);

Просмотреть файл

@ -27,6 +27,7 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
@ -81,7 +82,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
unsigned long addr;
size_t sz;
@ -105,7 +106,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
__drm_legacy_pci_free(dev, dmah);
kfree(dmah);
}
@ -127,34 +128,20 @@ static int drm_get_pci_domain(struct drm_device *dev)
return pci_domain_nr(dev->pdev->bus);
}
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (!master->unique)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
master->unique_len = strlen(master->unique);
return 0;
err:
return ret;
}
EXPORT_SYMBOL(drm_pci_set_busid);
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
@ -163,8 +150,7 @@ int drm_pci_set_unique(struct drm_device *dev,
int domain, bus, slot, func, ret;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
@ -269,10 +255,6 @@ void drm_pci_agp_destroy(struct drm_device *dev)
}
}
static struct drm_bus drm_pci_bus = {
.set_busid = drm_pci_set_busid,
};
/**
* drm_get_pci_dev - Register a PCI device with the DRM subsystem
* @pdev: PCI device
@ -353,8 +335,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
driver->bus = &drm_pci_bus;
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(pdriver);

Просмотреть файл

@ -68,42 +68,23 @@ err_free:
return ret;
}
static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret, id;
master->unique_len = 13 + strlen(dev->platformdev->name);
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
int id;
id = dev->platformdev->id;
/* if only a single instance of the platform device, id will be
* set to -1.. use 0 instead to avoid a funny looking bus-id:
*/
if (id == -1)
if (id < 0)
id = 0;
len = snprintf(master->unique, master->unique_len,
"platform:%s:%02d", dev->platformdev->name, id);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
ret = -EINVAL;
goto err;
}
master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
dev->platformdev->name, id);
if (!master->unique)
return -ENOMEM;
master->unique_len = strlen(master->unique);
return 0;
err:
return ret;
}
static struct drm_bus drm_platform_bus = {
.set_busid = drm_platform_set_busid,
};
EXPORT_SYMBOL(drm_platform_set_busid);
/**
* drm_platform_init - Register a platform device with the DRM subsystem
@ -120,7 +101,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor
{
DRM_DEBUG("\n");
driver->bus = &drm_platform_bus;
return drm_get_platform_dev(platform_device, driver);
}
EXPORT_SYMBOL(drm_platform_init);

Просмотреть файл

@ -29,6 +29,9 @@
#include <linux/export.h>
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include "drm_internal.h"
/*
* DMA-BUF/GEM Object references and lifetime overview:
@ -522,7 +525,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
goto fail_detach;
}
obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto fail_unmap;

Просмотреть файл

@ -82,6 +82,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
return;
}
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
struct drm_display_mode *mode;
if (!connector->cmdline_mode.specified)
return 0;
mode = drm_mode_create_from_cmdline_mode(connector->dev,
&connector->cmdline_mode);
if (mode == NULL)
return 0;
drm_mode_probed_add(connector, mode);
return 1;
}
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
@ -141,6 +157,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);
if (count == 0)
goto prune;

Просмотреть файл

@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
#define DEBUG_SCATTER 0
@ -78,8 +79,8 @@ void drm_legacy_sg_cleanup(struct drm_device *dev)
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
@ -194,8 +195,8 @@ int drm_sg_alloc(struct drm_device *dev, void *data,
return -ENOMEM;
}
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;

Просмотреть файл

@ -21,6 +21,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_core.h>
#include <drm/drmP.h>
#include "drm_internal.h"
#define to_drm_minor(d) dev_get_drvdata(d)
#define to_drm_connector(d) dev_get_drvdata(d)

Просмотреть файл

@ -1,88 +0,0 @@
#include <drm/drmP.h>
#include <drm/drm_usb.h>
#include <linux/usb.h>
#include <linux/module.h>
int drm_get_usb_dev(struct usb_interface *interface,
const struct usb_device_id *id,
struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
dev = drm_dev_alloc(driver, &interface->dev);
if (!dev)
return -ENOMEM;
dev->usbdev = interface_to_usbdev(interface);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary->index);
return 0;
err_free:
drm_dev_unref(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_usb_dev);
static int drm_usb_set_busid(struct drm_device *dev,
struct drm_master *master)
{
return 0;
}
static struct drm_bus drm_usb_bus = {
.set_busid = drm_usb_set_busid,
};
/**
* drm_usb_init - Register matching USB devices with the DRM subsystem
* @driver: DRM device driver
* @udriver: USB device driver
*
* Registers one or more devices matched by a USB driver with the DRM
* subsystem.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
{
int res;
DRM_DEBUG("\n");
driver->bus = &drm_usb_bus;
res = usb_register(udriver);
return res;
}
EXPORT_SYMBOL(drm_usb_init);
/**
* drm_usb_exit - Unregister matching USB devices from the DRM subsystem
* @driver: DRM device driver
* @udriver: USB device driver
*
* Unregisters one or more devices matched by a USB driver from the DRM
* subsystem.
*/
void drm_usb_exit(struct drm_driver *driver,
struct usb_driver *udriver)
{
usb_deregister(udriver);
}
EXPORT_SYMBOL(drm_usb_exit);
MODULE_AUTHOR("David Airlie");
MODULE_DESCRIPTION("USB DRM support");
MODULE_LICENSE("GPL and additional rights");

Просмотреть файл

@ -35,10 +35,19 @@
#include <drm/drmP.h>
#include <linux/export.h>
#include <linux/seq_file.h>
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
#endif
#include <asm/pgtable.h>
#include "drm_legacy.h"
struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
pid_t pid;
};
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
@ -48,15 +57,11 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
else
tmp = pgprot_writecombine(tmp);
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
@ -263,7 +268,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
__drm_legacy_pci_free(dev, &dmah);
break;
}
kfree(map);
@ -412,7 +417,6 @@ void drm_vm_open_locked(struct drm_device *dev,
list_add(&vma_entry->head, &dev->vmalist);
}
}
EXPORT_SYMBOL_GPL(drm_vm_open_locked);
static void drm_vm_open(struct vm_area_struct *vma)
{
@ -532,7 +536,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
@ -646,7 +650,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return 0;
}
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
@ -661,4 +665,69 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
EXPORT_SYMBOL(drm_mmap);
EXPORT_SYMBOL(drm_legacy_mmap);
void drm_legacy_vma_flush(struct drm_device *dev)
{
struct drm_vma_entry *vma, *vma_temp;
/* Clear vma list (only needed for legacy drivers) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
}
int drm_vma_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
unsigned long vma_count = 0;
#if defined(__i386__)
unsigned int pgprot;
#endif
mutex_lock(&dev->struct_mutex);
list_for_each_entry(pt, &dev->vmalist, head)
vma_count++;
seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
vma_count, high_memory,
(void *)(unsigned long)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
if (!vma)
continue;
seq_printf(m,
"\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
pt->pid,
(void *)vma->vm_start, (void *)vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_pgoff);
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
seq_printf(m, " %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_PSE ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l');
#endif
seq_printf(m, "\n");
}
mutex_unlock(&dev->struct_mutex);
return 0;
}

Просмотреть файл

@ -329,8 +329,8 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
return retval;
for (lane = 0; lane < lane_count; lane++)
buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 |
DP_TRAIN_VOLTAGE_SWING_400;
buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
lane_count, buf);
@ -937,6 +937,8 @@ static enum drm_connector_status exynos_dp_detect(
static void exynos_dp_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static struct drm_connector_funcs exynos_dp_connector_funcs = {
@ -1358,8 +1360,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
exynos_dp_connector_destroy(&dp->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&dp->connector);
}
static const struct component_ops exynos_dp_ops = {

Просмотреть файл

@ -32,7 +32,6 @@ enum exynos_crtc_mode {
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
* @drm_plane: pointer of private plane object for this crtc
* @manager: the manager associated with this crtc
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
@ -46,7 +45,6 @@ enum exynos_crtc_mode {
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct drm_plane *plane;
struct exynos_drm_manager *manager;
unsigned int pipe;
unsigned int dpms;
@ -94,12 +92,12 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
exynos_plane_commit(exynos_crtc->plane);
exynos_plane_commit(crtc->primary);
if (manager->ops->commit)
manager->ops->commit(manager);
exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
}
static bool
@ -123,10 +121,9 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_manager *manager = exynos_crtc->manager;
struct drm_plane *plane = exynos_crtc->plane;
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
int ret;
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
@ -134,29 +131,21 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
crtc_w = crtc->primary->fb->width - x;
crtc_h = crtc->primary->fb->height - y;
crtc_w = fb->width - x;
crtc_h = fb->height - y;
if (manager->ops->mode_set)
manager->ops->mode_set(manager, &crtc->mode);
ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
x, y, crtc_w, crtc_h);
if (ret)
return ret;
plane->crtc = crtc;
plane->fb = crtc->primary->fb;
drm_framebuffer_reference(plane->fb);
return 0;
return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
}
static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_plane *plane = exynos_crtc->plane;
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
int ret;
@ -167,11 +156,11 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
return -EPERM;
}
crtc_w = crtc->primary->fb->width - x;
crtc_h = crtc->primary->fb->height - y;
crtc_w = fb->width - x;
crtc_h = fb->height - y;
ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h,
x, y, crtc_w, crtc_h);
ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
if (ret)
return ret;
@ -304,8 +293,7 @@ static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
exynos_drm_crtc_commit(crtc);
break;
case CRTC_MODE_BLANK:
exynos_plane_dpms(exynos_crtc->plane,
DRM_MODE_DPMS_OFF);
exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
break;
default:
break;
@ -351,8 +339,10 @@ static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
{
struct exynos_drm_crtc *exynos_crtc;
struct drm_plane *plane;
struct exynos_drm_private *private = manager->drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
if (!exynos_crtc)
@ -364,11 +354,11 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
exynos_crtc->manager = manager;
exynos_crtc->pipe = manager->pipe;
exynos_crtc->plane = exynos_plane_init(manager->drm_dev,
1 << manager->pipe, true);
if (!exynos_crtc->plane) {
kfree(exynos_crtc);
return -ENOMEM;
plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto err_plane;
}
manager->crtc = &exynos_crtc->drm_crtc;
@ -376,12 +366,22 @@ int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
private->crtc[manager->pipe] = crtc;
drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs);
ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
&exynos_crtc_funcs);
if (ret < 0)
goto err_crtc;
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
exynos_drm_crtc_attach_mode_property(crtc);
return 0;
err_crtc:
plane->funcs->destroy(plane);
err_plane:
kfree(exynos_crtc);
return ret;
}
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)

Просмотреть файл

@ -342,8 +342,12 @@ int exynos_dpi_remove(struct device *dev)
struct exynos_dpi *ctx = exynos_dpi_display.ctx;
exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
exynos_dpi_connector_destroy(&ctx->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&ctx->connector);
if (ctx->panel)
drm_panel_detach(ctx->panel);
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);

Просмотреть файл

@ -15,7 +15,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <linux/anon_inodes.h>
#include <linux/component.h>
#include <drm/exynos_drm.h>
@ -86,8 +85,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
struct drm_plane *plane;
unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
plane = exynos_plane_init(dev, possible_crtcs,
DRM_PLANE_TYPE_OVERLAY);
if (IS_ERR(plane))
goto err_mode_config_cleanup;
}
@ -116,6 +116,23 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
/* force connectors detection */
drm_helper_hpd_irq_event(dev);
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
dev->irq_enabled = true;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
dev->vblank_disable_allowed = true;
return 0;
err_unbind_all:
@ -136,23 +153,19 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_device_subdrv_remove(dev);
exynos_drm_fbdev_fini(dev);
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
component_unbind_all(dev->dev, dev);
drm_vblank_cleanup(dev);
drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
return 0;
}
static const struct file_operations exynos_drm_gem_fops = {
.mmap = exynos_drm_gem_mmap_buffer,
};
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
@ -191,7 +204,6 @@ static int exynos_drm_resume(struct drm_device *dev)
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
struct file *anon_filp;
int ret;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@ -204,21 +216,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
if (ret)
goto err_file_priv_free;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
ret = PTR_ERR(anon_filp);
goto err_subdrv_close;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
err_subdrv_close:
exynos_drm_subdrv_close(dev, file);
err_file_priv_free:
kfree(file_priv);
file->driver_priv = NULL;
@ -234,7 +233,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct exynos_drm_private *private = dev->dev_private;
struct drm_exynos_file_private *file_priv;
struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
@ -260,10 +258,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
spin_unlock_irqrestore(&dev->event_lock, flags);
file_priv = file->driver_priv;
if (file_priv->anon_filp)
fput(file_priv->anon_filp);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@ -282,11 +276,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
@ -330,6 +319,7 @@ static struct drm_driver exynos_drm_driver = {
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
.set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
@ -485,21 +475,20 @@ void exynos_drm_component_del(struct device *dev,
mutex_unlock(&drm_component_lock);
}
static int compare_of(struct device *dev, void *data)
static int compare_dev(struct device *dev, void *data)
{
return dev == (struct device *)data;
}
static int exynos_drm_add_components(struct device *dev, struct master *m)
static struct component_match *exynos_drm_match_add(struct device *dev)
{
struct component_match *match = NULL;
struct component_dev *cdev;
unsigned int attach_cnt = 0;
mutex_lock(&drm_component_lock);
list_for_each_entry(cdev, &drm_component_list, list) {
int ret;
/*
* Add components to master only in case that crtc and
* encoder/connector device objects exist.
@ -514,16 +503,10 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
/*
* fimd and dpi modules have same device object so add
* only crtc device object in this case.
*
* TODO. if dpi module follows driver-model driver then
* below codes can be removed.
*/
if (cdev->crtc_dev == cdev->conn_dev) {
ret = component_master_add_child(m, compare_of,
cdev->crtc_dev);
if (ret < 0)
return ret;
component_match_add(dev, &match, compare_dev,
cdev->crtc_dev);
goto out_lock;
}
@ -533,11 +516,8 @@ static int exynos_drm_add_components(struct device *dev, struct master *m)
* connector/encoder need pipe number of crtc when they
* are created.
*/
ret = component_master_add_child(m, compare_of, cdev->crtc_dev);
ret |= component_master_add_child(m, compare_of,
cdev->conn_dev);
if (ret < 0)
return ret;
component_match_add(dev, &match, compare_dev, cdev->crtc_dev);
component_match_add(dev, &match, compare_dev, cdev->conn_dev);
out_lock:
mutex_lock(&drm_component_lock);
@ -545,7 +525,7 @@ out_lock:
mutex_unlock(&drm_component_lock);
return attach_cnt ? 0 : -ENODEV;
return attach_cnt ? match : ERR_PTR(-EPROBE_DEFER);
}
static int exynos_drm_bind(struct device *dev)
@ -559,13 +539,13 @@ static void exynos_drm_unbind(struct device *dev)
}
static const struct component_master_ops exynos_drm_ops = {
.add_components = exynos_drm_add_components,
.bind = exynos_drm_bind,
.unbind = exynos_drm_unbind,
};
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
struct component_match *match;
int ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@ -632,13 +612,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
goto err_unregister_ipp_drv;
#endif
ret = component_master_add(&pdev->dev, &exynos_drm_ops);
if (ret < 0)
DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n");
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match)) {
ret = PTR_ERR(match);
goto err_unregister_resources;
}
return 0;
ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
match);
if (ret < 0)
goto err_unregister_resources;
return ret;
err_unregister_resources:
#ifdef CONFIG_DRM_EXYNOS_IPP
exynos_platform_device_ipp_unregister();
err_unregister_ipp_drv:
platform_driver_unregister(&ipp_driver);
err_unregister_gsc_drv:

Просмотреть файл

@ -240,7 +240,6 @@ struct exynos_drm_g2d_private {
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
struct device *ipp_dev;
struct file *anon_filp;
};
/*

Просмотреть файл

@ -114,6 +114,8 @@
#define DSIM_SYNC_INFORM (1 << 27)
#define DSIM_EOT_DISABLE (1 << 28)
#define DSIM_MFLUSH_VS (1 << 29)
/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
#define DSIM_CLKLANE_STOP (1 << 30)
/* DSIM_ESCMODE */
#define DSIM_TX_TRIGGER_RST (1 << 4)
@ -262,6 +264,7 @@ struct exynos_dsi_driver_data {
unsigned int plltmr_reg;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
};
struct exynos_dsi {
@ -301,9 +304,16 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
.plltmr_reg = 0x50,
.has_freqband = 1,
.has_clklane_stop = 1,
};
static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.plltmr_reg = 0x50,
.has_freqband = 1,
.has_clklane_stop = 1,
};
static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
@ -311,6 +321,8 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
};
static struct of_device_id exynos_dsi_of_match[] = {
{ .compatible = "samsung,exynos3250-mipi-dsi",
.data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi",
@ -421,7 +433,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
if (!fout) {
dev_err(dsi->dev,
"failed to find PLL PMS for requested frequency\n");
return -EFAULT;
return 0;
}
dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
@ -453,7 +465,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
do {
if (timeout-- == 0) {
dev_err(dsi->dev, "PLL failed to stabilize\n");
return -EFAULT;
return 0;
}
reg = readl(dsi->reg_base + DSIM_STATUS_REG);
} while ((reg & DSIM_PLL_STABLE) == 0);
@ -569,6 +581,7 @@ static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
static int exynos_dsi_init_link(struct exynos_dsi *dsi)
{
struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
int timeout;
u32 reg;
u32 lanes_mask;
@ -650,6 +663,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
reg |= DSIM_LANE_EN(lanes_mask);
writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
/*
* Use non-continuous clock mode if the periparal wants and
* host controller supports
*
* In non-continous clock mode, host controller will turn off
* the HS clock between high-speed transmissions to reduce
* power consumption.
*/
if (driver_data->has_clklane_stop &&
dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
reg |= DSIM_CLKLANE_STOP;
writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
}
/* Check clock and data lane state are stop state */
timeout = 100;
do {
@ -1414,6 +1441,9 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
connector->dev = NULL;
}
static struct drm_connector_funcs exynos_dsi_connector_funcs = {
@ -1634,10 +1664,10 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
mipi_dsi_host_unregister(&dsi->dsi_host);
exynos_dsi_connector_destroy(&dsi->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&dsi->connector);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
static const struct component_ops exynos_dsi_component_ops = {

Просмотреть файл

@ -165,6 +165,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
kfree(exynos_fb);
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}

Просмотреть файл

@ -123,6 +123,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
fbi->screen_base = buffer->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
return 0;
}
@ -353,9 +354,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
fbdev = to_exynos_fbdev(private->fb_helper);
if (fbdev->exynos_gem_obj)
exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
exynos_drm_fbdev_destroy(dev, private->fb_helper);
kfree(fbdev);
private->fb_helper = NULL;

Просмотреть файл

@ -336,9 +336,6 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
fimc_clear_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
ctx->id, status);
@ -718,24 +715,24 @@ static int fimc_src_set_addr(struct device *dev,
case IPP_BUF_ENQUEUE:
config = &property->config[EXYNOS_DRM_OPS_SRC];
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
EXYNOS_CIIYSA(buf_id));
EXYNOS_CIIYSA0);
if (config->fmt == DRM_FORMAT_YVU420) {
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICBSA(buf_id));
EXYNOS_CIICBSA0);
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICRSA(buf_id));
EXYNOS_CIICRSA0);
} else {
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
EXYNOS_CIICBSA(buf_id));
EXYNOS_CIICBSA0);
fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
EXYNOS_CIICRSA(buf_id));
EXYNOS_CIICRSA0);
}
break;
case IPP_BUF_DEQUEUE:
fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id));
fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
break;
default:
/* bypass */
@ -1122,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap,
return 0;
}
static int fimc_dst_get_buf_count(struct fimc_context *ctx)
{
u32 cfg, buf_num;
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
buf_num = hweight32(cfg);
DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
return buf_num;
}
static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
enum drm_exynos_ipp_buf_type buf_type)
{
struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
bool enable;
u32 cfg;
u32 mask = 0x00000001 << buf_id;
int ret = 0;
unsigned long flags;
u32 buf_num;
u32 cfg;
DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
spin_lock_irqsave(&ctx->lock, flags);
/* mask register set */
cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
switch (buf_type) {
case IPP_BUF_ENQUEUE:
enable = true;
break;
case IPP_BUF_DEQUEUE:
enable = false;
break;
default:
dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
ret = -EINVAL;
goto err_unlock;
}
if (buf_type == IPP_BUF_ENQUEUE)
cfg |= (1 << buf_id);
else
cfg &= ~(1 << buf_id);
/* sequence id */
cfg &= ~mask;
cfg |= (enable << buf_id);
fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ);
/* interrupt enable */
if (buf_type == IPP_BUF_ENQUEUE &&
fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
buf_num = hweight32(cfg);
/* interrupt disable */
if (buf_type == IPP_BUF_DEQUEUE &&
fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP)
if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
fimc_mask_irq(ctx, true);
else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
fimc_mask_irq(ctx, false);
err_unlock:
spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
static int fimc_dst_set_addr(struct device *dev,
@ -1240,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev,
break;
}
return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
return 0;
}
static struct exynos_drm_ipp_ops fimc_dst_ops = {
@ -1291,14 +1257,11 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
DRM_ERROR("failed to dequeue.\n");
return IRQ_HANDLED;
}
fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
queue_work(ippdrv->event_workq, &event_work->work);
return IRQ_HANDLED;
}
@ -1590,12 +1553,9 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
if (cmd == IPP_CMD_M2M) {
if (cmd == IPP_CMD_M2M)
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
}
return 0;
}

Просмотреть файл

@ -104,6 +104,14 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
.has_limited_fmt = 1,
};
static struct fimd_driver_data exynos3_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
.lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
.has_vidoutcon = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
.lcdblk_offset = 0x210,
@ -168,6 +176,8 @@ struct fimd_context {
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
{ .compatible = "samsung,exynos3250-fimd",
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
@ -204,7 +214,6 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void fimd_clear_channel(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
@ -214,17 +223,31 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + SHADOWCON);
if (val & SHADOWCON_CHx_ENABLE(win)) {
val &= ~SHADOWCON_CHx_ENABLE(win);
writel(val, ctx->regs + SHADOWCON);
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
/* wincon */
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
/* unprotect windows */
if (ctx->driver_data->has_shadowcon) {
val = readl(ctx->regs + SHADOWCON);
val &= ~SHADOWCON_CHx_ENABLE(win);
writel(val, ctx->regs + SHADOWCON);
}
ch_enabled = 1;
}
}
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
if (ch_enabled) {
unsigned int state = ctx->suspended;
ctx->suspended = 0;
fimd_wait_for_vblank(mgr);
ctx->suspended = state;
}
}
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
@ -237,23 +260,6 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
mgr->drm_dev = ctx->drm_dev = drm_dev;
mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = true;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
/*
@ -1051,7 +1057,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
{
struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
struct fimd_context *ctx = fimd_manager.ctx;
struct drm_crtc *crtc = mgr->crtc;
fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
@ -1059,8 +1064,6 @@ static void fimd_unbind(struct device *dev, struct device *master,
exynos_dpi_remove(dev);
fimd_mgr_remove(mgr);
crtc->funcs->destroy(crtc);
}
static const struct component_ops fimd_component_ops = {

Просмотреть файл

@ -318,40 +318,16 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
}
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
args->handle, (unsigned long)args->offset);
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return -ENODEV;
}
return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
&args->offset);
}
int exynos_drm_gem_mmap_buffer(struct file *filp,
int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
struct exynos_drm_gem_buf *buffer;
unsigned long vm_size;
int ret;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
update_vm_cache_attr(exynos_gem_obj, vma);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
@ -373,60 +349,6 @@ int exynos_drm_gem_mmap_buffer(struct file *filp,
return ret;
}
/*
* take a reference to this mapping of the object. And this reference
* is unreferenced by the corresponding vm_close call.
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(drm_dev, vma);
return 0;
}
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_file_private *exynos_file_priv;
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
struct file *anon_filp;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return -ENODEV;
}
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
exynos_file_priv = file_priv->driver_priv;
anon_filp = exynos_file_priv->anon_filp;
anon_filp->private_data = obj;
addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
drm_gem_object_unreference(obj);
if (IS_ERR_VALUE(addr)) {
mutex_unlock(&dev->struct_mutex);
return (int)addr;
}
mutex_unlock(&dev->struct_mutex);
args->mapped = addr;
DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
return 0;
}
@ -710,16 +632,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_gem_flags(exynos_gem_obj->flags);
if (ret) {
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
if (ret)
goto err_close_vm;
update_vm_cache_attr(exynos_gem_obj, vma);
ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
if (ret)
goto err_close_vm;
return ret;
err_close_vm:
drm_gem_vm_close(vma);
drm_gem_free_mmap_offset(obj);
return ret;
}

Просмотреть файл

@ -12,6 +12,8 @@
#ifndef _EXYNOS_DRM_GEM_H_
#define _EXYNOS_DRM_GEM_H_
#include <drm/drm_gem.h>
#define to_exynos_gem_obj(x) container_of(x,\
struct exynos_drm_gem_obj, base)
@ -111,20 +113,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
* mmap the physically continuous memory that a gem object contains
* to user space.
*/
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma);
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);

Просмотреть файл

@ -1326,8 +1326,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
buf_id[EXYNOS_DRM_OPS_SRC];
event_work->buf_id[EXYNOS_DRM_OPS_DST] =
buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
queue_work(ippdrv->event_workq, &event_work->work);
}
return IRQ_HANDLED;

Просмотреть файл

@ -75,7 +75,6 @@ struct drm_exynos_ipp_mem_node {
u32 prop_id;
u32 buf_id;
struct drm_exynos_ipp_buf_info buf_info;
struct drm_file *filp;
};
/*
@ -319,44 +318,6 @@ static void ipp_print_property(struct drm_exynos_ipp_property *property,
sz->hsize, sz->vsize, config->flip, config->degree);
}
static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
{
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
u32 prop_id = property->prop_id;
DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
ippdrv = ipp_find_drv_by_handle(prop_id);
if (IS_ERR(ippdrv)) {
DRM_ERROR("failed to get ipp driver.\n");
return -EINVAL;
}
/*
* Find command node using command list in ippdrv.
* when we find this command no using prop_id.
* return property information set in this command node.
*/
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
if ((c_node->property.prop_id == prop_id) &&
(c_node->state == IPP_STATE_STOP)) {
mutex_unlock(&ippdrv->cmd_lock);
DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
property->cmd, (int)ippdrv);
c_node->property = *property;
return 0;
}
}
mutex_unlock(&ippdrv->cmd_lock);
DRM_ERROR("failed to search property.\n");
return -EINVAL;
}
static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
{
struct drm_exynos_ipp_cmd_work *cmd_work;
@ -392,6 +353,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
struct drm_exynos_ipp_property *property = data;
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
u32 prop_id;
int ret, i;
if (!ctx) {
@ -404,6 +366,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
prop_id = property->prop_id;
/*
* This is log print for user application property.
* user application set various property.
@ -412,14 +376,24 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
ipp_print_property(property, i);
/*
* set property ioctl generated new prop_id.
* but in this case already asigned prop_id using old set property.
* e.g PAUSE state. this case supports find current prop_id and use it
* instead of allocation.
* In case prop_id is not zero try to set existing property.
*/
if (property->prop_id) {
DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
return ipp_find_and_set_property(property);
if (prop_id) {
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
if (!c_node || c_node->filp != file) {
DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
return -EINVAL;
}
if (c_node->state != IPP_STATE_STOP) {
DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
return -EINVAL;
}
c_node->property = *property;
return 0;
}
/* find ipp driver using ipp id */
@ -445,9 +419,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
property->prop_id, property->cmd, (int)ippdrv);
/* stored property information and ippdrv in private data */
c_node->dev = dev;
c_node->property = *property;
c_node->state = IPP_STATE_IDLE;
c_node->filp = file;
c_node->start_work = ipp_create_cmd_work();
if (IS_ERR(c_node->start_work)) {
@ -499,9 +473,197 @@ err_clear:
return ret;
}
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
{
int i;
DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
if (!m_node) {
DRM_ERROR("invalid dequeue node.\n");
return -EFAULT;
}
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* put gem buffer */
for_each_ipp_planar(i) {
unsigned long handle = m_node->buf_info.handles[i];
if (handle)
exynos_drm_gem_put_dma_addr(drm_dev, handle,
c_node->filp);
}
list_del(&m_node->list);
kfree(m_node);
return 0;
}
static struct drm_exynos_ipp_mem_node
*ipp_get_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
struct drm_exynos_ipp_buf_info *buf_info;
int i;
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
return ERR_PTR(-ENOMEM);
buf_info = &m_node->buf_info;
/* operations, buffer id */
m_node->ops_id = qbuf->ops_id;
m_node->prop_id = qbuf->prop_id;
m_node->buf_id = qbuf->buf_id;
INIT_LIST_HEAD(&m_node->list);
DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
for_each_ipp_planar(i) {
DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
/* get dma address by handle */
if (qbuf->handle[i]) {
dma_addr_t *addr;
addr = exynos_drm_gem_get_dma_addr(drm_dev,
qbuf->handle[i], c_node->filp);
if (IS_ERR(addr)) {
DRM_ERROR("failed to get addr.\n");
ipp_put_mem_node(drm_dev, c_node, m_node);
return ERR_PTR(-EFAULT);
}
buf_info->handles[i] = qbuf->handle[i];
buf_info->base[i] = *addr;
DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
buf_info->base[i], buf_info->handles[i]);
}
}
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
return m_node;
}
static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node, int ops)
{
struct drm_exynos_ipp_mem_node *m_node, *tm_node;
struct list_head *head = &c_node->mem_list[ops];
mutex_lock(&c_node->mem_lock);
list_for_each_entry_safe(m_node, tm_node, head, list) {
int ret;
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret)
DRM_ERROR("failed to put m_node.\n");
}
mutex_unlock(&c_node->mem_lock);
}
static void ipp_free_event(struct drm_pending_event *event)
{
kfree(event);
}
static int ipp_get_event(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_send_event *e;
unsigned long flags;
DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
c_node->filp->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
return -ENOMEM;
}
/* make event */
e->event.base.type = DRM_EXYNOS_IPP_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = qbuf->user_data;
e->event.prop_id = qbuf->prop_id;
e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
e->base.event = &e->event.base;
e->base.file_priv = c_node->filp;
e->base.destroy = ipp_free_event;
mutex_lock(&c_node->event_lock);
list_add_tail(&e->base.link, &c_node->event_list);
mutex_unlock(&c_node->event_lock);
return 0;
}
static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_send_event *e, *te;
int count = 0;
mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
* qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
if (!qbuf) {
/* delete list */
list_del(&e->base.link);
kfree(e);
}
/* compare buffer id */
if (qbuf && (qbuf->buf_id ==
e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
/* delete list */
list_del(&e->base.link);
kfree(e);
goto out_unlock;
}
}
out_unlock:
mutex_unlock(&c_node->event_lock);
return;
}
static void ipp_clean_cmd_node(struct ipp_context *ctx,
struct drm_exynos_ipp_cmd_node *c_node)
{
int i;
/* cancel works */
cancel_work_sync(&c_node->start_work->work);
cancel_work_sync(&c_node->stop_work->work);
cancel_work_sync(&c_node->event_work->work);
/* put event */
ipp_put_event(c_node, NULL);
for_each_ipp_ops(i)
ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
/* delete list */
list_del(&c_node->list);
@ -595,168 +757,6 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
return ret;
}
static struct drm_exynos_ipp_mem_node
*ipp_get_mem_node(struct drm_device *drm_dev,
struct drm_file *file,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
struct drm_exynos_ipp_buf_info *buf_info;
int i;
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
return ERR_PTR(-ENOMEM);
buf_info = &m_node->buf_info;
/* operations, buffer id */
m_node->ops_id = qbuf->ops_id;
m_node->prop_id = qbuf->prop_id;
m_node->buf_id = qbuf->buf_id;
DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
for_each_ipp_planar(i) {
DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
/* get dma address by handle */
if (qbuf->handle[i]) {
dma_addr_t *addr;
addr = exynos_drm_gem_get_dma_addr(drm_dev,
qbuf->handle[i], file);
if (IS_ERR(addr)) {
DRM_ERROR("failed to get addr.\n");
goto err_clear;
}
buf_info->handles[i] = qbuf->handle[i];
buf_info->base[i] = *addr;
DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
buf_info->base[i], buf_info->handles[i]);
}
}
m_node->filp = file;
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
return m_node;
err_clear:
kfree(m_node);
return ERR_PTR(-EFAULT);
}
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
{
int i;
DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
if (!m_node) {
DRM_ERROR("invalid dequeue node.\n");
return -EFAULT;
}
DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
/* put gem buffer */
for_each_ipp_planar(i) {
unsigned long handle = m_node->buf_info.handles[i];
if (handle)
exynos_drm_gem_put_dma_addr(drm_dev, handle,
m_node->filp);
}
/* delete list in queue */
list_del(&m_node->list);
kfree(m_node);
return 0;
}
static void ipp_free_event(struct drm_pending_event *event)
{
kfree(event);
}
static int ipp_get_event(struct drm_device *drm_dev,
struct drm_file *file,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_send_event *e;
unsigned long flags;
DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
return -ENOMEM;
}
/* make event */
e->event.base.type = DRM_EXYNOS_IPP_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = qbuf->user_data;
e->event.prop_id = qbuf->prop_id;
e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
e->base.event = &e->event.base;
e->base.file_priv = file;
e->base.destroy = ipp_free_event;
mutex_lock(&c_node->event_lock);
list_add_tail(&e->base.link, &c_node->event_list);
mutex_unlock(&c_node->event_lock);
return 0;
}
static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_send_event *e, *te;
int count = 0;
mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
* qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
if (!qbuf) {
/* delete list */
list_del(&e->base.link);
kfree(e);
}
/* compare buffer id */
if (qbuf && (qbuf->buf_id ==
e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
/* delete list */
list_del(&e->base.link);
kfree(e);
goto out_unlock;
}
}
out_unlock:
mutex_unlock(&c_node->event_lock);
return;
}
static void ipp_handle_cmd_work(struct device *dev,
struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_work *cmd_work,
@ -766,7 +766,7 @@ static void ipp_handle_cmd_work(struct device *dev,
cmd_work->ippdrv = ippdrv;
cmd_work->c_node = c_node;
queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
queue_work(ctx->cmd_workq, &cmd_work->work);
}
static int ipp_queue_buf_with_run(struct device *dev,
@ -872,7 +872,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
if (!c_node) {
if (!c_node || c_node->filp != file) {
DRM_ERROR("failed to get command node.\n");
return -ENODEV;
}
@ -881,7 +881,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
switch (qbuf->buf_type) {
case IPP_BUF_ENQUEUE:
/* get memory node */
m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
if (IS_ERR(m_node)) {
DRM_ERROR("failed to get m_node.\n");
return PTR_ERR(m_node);
@ -894,7 +894,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
*/
if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
/* get event for destination buffer */
ret = ipp_get_event(drm_dev, file, c_node, qbuf);
ret = ipp_get_event(drm_dev, c_node, qbuf);
if (ret) {
DRM_ERROR("failed to get event.\n");
goto err_clean_node;
@ -1007,7 +1007,7 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
if (!c_node) {
if (!c_node || c_node->filp != file) {
DRM_ERROR("invalid command node list.\n");
return -ENODEV;
}
@ -1257,80 +1257,39 @@ static int ipp_stop_property(struct drm_device *drm_dev,
struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_node *c_node)
{
struct drm_exynos_ipp_mem_node *m_node, *tm_node;
struct drm_exynos_ipp_property *property = &c_node->property;
struct list_head *head;
int ret = 0, i;
int i;
DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
/* put event */
ipp_put_event(c_node, NULL);
mutex_lock(&c_node->mem_lock);
/* check command */
switch (property->cmd) {
case IPP_CMD_M2M:
for_each_ipp_ops(i) {
/* source/destination memory list */
head = &c_node->mem_list[i];
list_for_each_entry_safe(m_node, tm_node,
head, list) {
ret = ipp_put_mem_node(drm_dev, c_node,
m_node);
if (ret) {
DRM_ERROR("failed to put m_node.\n");
goto err_clear;
}
}
}
break;
case IPP_CMD_WB:
/* destination memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
DRM_ERROR("failed to put m_node.\n");
goto err_clear;
}
}
break;
case IPP_CMD_OUTPUT:
/* source memory list */
head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
list_for_each_entry_safe(m_node, tm_node, head, list) {
ret = ipp_put_mem_node(drm_dev, c_node, m_node);
if (ret) {
DRM_ERROR("failed to put m_node.\n");
goto err_clear;
}
}
break;
default:
DRM_ERROR("invalid operations.\n");
ret = -EINVAL;
goto err_clear;
}
err_clear:
mutex_unlock(&c_node->mem_lock);
/* stop operations */
if (ippdrv->stop)
ippdrv->stop(ippdrv->dev, property->cmd);
return ret;
/* check command */
switch (property->cmd) {
case IPP_CMD_M2M:
for_each_ipp_ops(i)
ipp_clean_mem_nodes(drm_dev, c_node, i);
break;
case IPP_CMD_WB:
ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
break;
case IPP_CMD_OUTPUT:
ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
break;
default:
DRM_ERROR("invalid operations.\n");
return -EINVAL;
}
return 0;
}
void ipp_sched_cmd(struct work_struct *work)
{
struct drm_exynos_ipp_cmd_work *cmd_work =
(struct drm_exynos_ipp_cmd_work *)work;
container_of(work, struct drm_exynos_ipp_cmd_work, work);
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
struct drm_exynos_ipp_property *property;
@ -1543,7 +1502,7 @@ err_event_unlock:
void ipp_sched_event(struct work_struct *work)
{
struct drm_exynos_ipp_event_work *event_work =
(struct drm_exynos_ipp_event_work *)work;
container_of(work, struct drm_exynos_ipp_event_work, work);
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
int ret;
@ -1646,11 +1605,11 @@ err:
static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct exynos_drm_ippdrv *ippdrv;
struct exynos_drm_ippdrv *ippdrv, *t;
struct ipp_context *ctx = get_ipp_context(dev);
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, ippdrv->dev);
@ -1677,14 +1636,11 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node,
@ -1692,7 +1648,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
count++, (int)ippdrv);
if (c_node->dev == file_priv->ipp_dev) {
if (c_node->filp == file) {
/*
* userland goto unnormal state. process killed.
* and close the file.
@ -1808,63 +1764,12 @@ static int ipp_remove(struct platform_device *pdev)
return 0;
}
static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
{
DRM_DEBUG_KMS("enable[%d]\n", enable);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ipp_suspend(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
if (pm_runtime_suspended(dev))
return 0;
return ipp_power_ctrl(ctx, false);
}
static int ipp_resume(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
if (!pm_runtime_suspended(dev))
return ipp_power_ctrl(ctx, true);
return 0;
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int ipp_runtime_suspend(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
return ipp_power_ctrl(ctx, false);
}
static int ipp_runtime_resume(struct device *dev)
{
struct ipp_context *ctx = get_ipp_context(dev);
return ipp_power_ctrl(ctx, true);
}
#endif
static const struct dev_pm_ops ipp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
};
struct platform_driver ipp_driver = {
.probe = ipp_probe,
.remove = ipp_remove,
.driver = {
.name = "exynos-drm-ipp",
.owner = THIS_MODULE,
.pm = &ipp_pm_ops,
},
};

Просмотреть файл

@ -48,7 +48,6 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
* @dev: IPP device.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
@ -62,9 +61,9 @@ struct drm_exynos_ipp_cmd_work {
* @stop_work: stop command work structure.
* @event_work: event work structure.
* @state: state of command node.
* @filp: associated file pointer.
*/
struct drm_exynos_ipp_cmd_node {
struct device *dev;
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
@ -78,6 +77,7 @@ struct drm_exynos_ipp_cmd_node {
struct drm_exynos_ipp_cmd_work *stop_work;
struct drm_exynos_ipp_event_work *event_work;
enum drm_exynos_ipp_state state;
struct drm_file *filp;
};
/*

Просмотреть файл

@ -139,6 +139,8 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
overlay->crtc_x, overlay->crtc_y,
overlay->crtc_width, overlay->crtc_height);
plane->crtc = crtc;
exynos_drm_crtc_plane_mode_set(crtc, overlay);
return 0;
@ -187,8 +189,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret < 0)
return ret;
plane->crtc = crtc;
exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
@ -254,25 +254,26 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
}
struct drm_plane *exynos_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, bool priv)
unsigned long possible_crtcs,
enum drm_plane_type type)
{
struct exynos_plane *exynos_plane;
int err;
exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
if (!exynos_plane)
return NULL;
return ERR_PTR(-ENOMEM);
err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats, ARRAY_SIZE(formats),
priv);
err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats,
ARRAY_SIZE(formats), type);
if (err) {
DRM_ERROR("failed to initialize plane\n");
kfree(exynos_plane);
return NULL;
return ERR_PTR(err);
}
if (priv)
if (type == DRM_PLANE_TYPE_PRIMARY)
exynos_plane->overlay.zpos = DEFAULT_ZPOS;
else
exynos_plane_attach_zpos_property(&exynos_plane->base);

Просмотреть файл

@ -17,4 +17,5 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
void exynos_plane_commit(struct drm_plane *plane);
void exynos_plane_dpms(struct drm_plane *plane, int mode);
struct drm_plane *exynos_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, bool priv);
unsigned long possible_crtcs,
enum drm_plane_type type);

Просмотреть файл

@ -156,8 +156,7 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] =
rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
queue_work(ippdrv->event_workq, &event_work->work);
} else {
DRM_ERROR("the SFR is set illegally\n");
}

Просмотреть файл

@ -303,23 +303,6 @@ static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
mgr->drm_dev = ctx->drm_dev = drm_dev;
mgr->pipe = ctx->pipe = priv->pipe++;
/*
* enable drm irq mode.
* - with irq_enabled = 1, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = 1;
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
return 0;
}
@ -648,7 +631,6 @@ static int vidi_remove(struct platform_device *pdev)
struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
struct vidi_context *ctx = mgr->ctx;
struct drm_encoder *encoder = ctx->encoder;
struct drm_crtc *crtc = mgr->crtc;
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
@ -657,7 +639,6 @@ static int vidi_remove(struct platform_device *pdev)
return -EINVAL;
}
crtc->funcs->destroy(crtc);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&ctx->connector);

Просмотреть файл

@ -1040,6 +1040,8 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
static void hdmi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static struct drm_connector_funcs hdmi_connector_funcs = {
@ -2314,8 +2316,8 @@ static void hdmi_unbind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder = display->encoder;
struct hdmi_context *hdata = display->ctx;
hdmi_connector_destroy(&hdata->connector);
encoder->funcs->destroy(encoder);
drm_connector_cleanup(&hdata->connector);
}
static const struct component_ops hdmi_component_ops = {

Просмотреть файл

@ -1302,15 +1302,12 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
struct drm_crtc *crtc = mgr->crtc;
dev_info(dev, "remove successful\n");
mixer_mgr_remove(mgr);
pm_runtime_disable(dev);
crtc->funcs->destroy(crtc);
}
static const struct component_ops mixer_component_ops = {

Просмотреть файл

@ -1089,7 +1089,7 @@ static char *link_train_names[] = {
};
#endif
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
/*
static uint8_t
cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@ -1276,7 +1276,7 @@ cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level
cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
else
cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);

Просмотреть файл

@ -540,7 +540,8 @@ static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
struct psb_fbdev *psb_fbdev =
container_of(helper, struct psb_fbdev, psb_fb_helper);
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
int bytespp;

Просмотреть файл

@ -21,6 +21,7 @@
#define _PSB_GTT_H_
#include <drm/drmP.h>
#include <drm/drm_gem.h>
/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
struct psb_gtt {

Просмотреть файл

@ -116,30 +116,30 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
switch (edp_link_params->preemphasis) {
case 0:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case 1:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case 2:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case 3:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
}
switch (edp_link_params->vswing) {
case 0:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case 1:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case 2:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case 3:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
}
DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",

Просмотреть файл

@ -476,6 +476,7 @@ static struct drm_driver driver = {
.unload = psb_driver_unload,
.lastclose = psb_driver_lastclose,
.preclose = psb_driver_preclose,
.set_busid = drm_pci_set_busid,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.device_is_agp = psb_driver_device_is_agp,

Просмотреть файл

@ -213,7 +213,7 @@ static int i810_dma_cleanup(struct drm_device *dev)
(drm_i810_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
@ -227,7 +227,7 @@ static int i810_dma_cleanup(struct drm_device *dev)
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
drm_core_ioremapfree(&buf_priv->map, dev);
drm_legacy_ioremapfree(&buf_priv->map, dev);
}
}
return 0;
@ -306,7 +306,7 @@ static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_pr
buf_priv->map.flags = 0;
buf_priv->map.mtrr = 0;
drm_core_ioremap(&buf_priv->map, dev);
drm_legacy_ioremap(&buf_priv->map, dev);
buf_priv->kernel_virtual = buf_priv->map.handle;
}
@ -334,7 +334,7 @@ static int i810_dma_initialize(struct drm_device *dev,
DRM_ERROR("can not find sarea!\n");
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
@ -342,7 +342,7 @@ static int i810_dma_initialize(struct drm_device *dev,
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
@ -363,7 +363,7 @@ static int i810_dma_initialize(struct drm_device *dev,
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap(&dev_priv->ring.map, dev);
drm_legacy_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
@ -1215,9 +1215,9 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
}
if (file_priv->master && file_priv->master->lock.hw_lock) {
drm_idlelock_take(&file_priv->master->lock);
drm_legacy_idlelock_take(&file_priv->master->lock);
i810_driver_reclaim_buffers(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
drm_legacy_idlelock_release(&file_priv->master->lock);
} else {
/* master disappeared, clean up stuff anyway and hope nothing
* goes wrong */

Просмотреть файл

@ -47,7 +47,7 @@ static const struct file_operations i810_driver_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.mmap = drm_legacy_mmap,
.poll = drm_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
@ -63,6 +63,7 @@ static struct drm_driver driver = {
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.set_busid = drm_pci_set_busid,
.device_is_agp = i810_driver_device_is_agp,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,

Просмотреть файл

@ -32,6 +32,8 @@
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
#include <drm/drm_legacy.h>
/* General customization:
*/

Просмотреть файл

@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_lrc.o \
intel_ringbuffer.o \
intel_uncore.o

Просмотреть файл

@ -60,16 +60,297 @@
#define NS2501_REGC 0x0c
enum {
MODE_640x480,
MODE_800x600,
MODE_1024x768,
};
struct ns2501_reg {
uint8_t offset;
uint8_t value;
};
/*
* Magic values based on what the BIOS on
* Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
*/
static const struct ns2501_reg regs_1024x768[][86] = {
[MODE_640x480] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x05, },
},
[MODE_800x600] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x19, },
[5] = { .offset = 0x1c, .value = 0x64, },
[6] = { .offset = 0x1d, .value = 0x02, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0xd7, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0xf8, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x1a, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x73, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0x27, },
[20] = { .offset = 0x81, .value = 0x03, },
[21] = { .offset = 0x82, .value = 0x41, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x06, },
[33] = { .offset = 0x9c, .value = 0x23, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x30, },
[43] = { .offset = 0xb9, .value = 0xc8, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x20, },
[47] = { .offset = 0x11, .value = 0xc8, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x04, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x83, },
[67] = { .offset = 0xaa, .value = 0x40, },
[68] = { .offset = 0xab, .value = 0x32, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x80, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x07, },
},
[MODE_1024x768] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x01, },
},
};
static const struct ns2501_reg regs_init[] = {
[0] = { .offset = 0x35, .value = 0xff, },
[1] = { .offset = 0x34, .value = 0x00, },
[2] = { .offset = 0x08, .value = 0x30, },
};
struct ns2501_priv {
//I2CDevRec d;
bool quiet;
int reg_8_shadow;
int reg_8_set;
// Shadow registers for i915
int dvoc;
int pll_a;
int srcdim;
int fw_blc;
const struct ns2501_reg *regs;
};
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
@ -205,11 +486,9 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
goto out;
}
ns->quiet = false;
ns->reg_8_set = 0;
ns->reg_8_shadow =
NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
return true;
out:
@ -242,9 +521,9 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
* of the panel in here so we could always accept it
* by disabling the scaler.
*/
if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
(mode->hdisplay == 640 && mode->vdisplay == 480) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768)) {
if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
(mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
return MODE_OK;
} else {
return MODE_ONE_SIZE; /* Is this a reasonable error? */
@ -255,180 +534,30 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ok;
int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
int mode_idx, i;
DRM_DEBUG_KMS
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
/*
* Where do I find the native resolution for which scaling is not required???
*
* First trigger the DVO on as otherwise the chip does not appear on the i2c
* bus.
*/
do {
ok = true;
if (mode->hdisplay == 640 && mode->vdisplay == 480)
mode_idx = MODE_640x480;
else if (mode->hdisplay == 800 && mode->vdisplay == 600)
mode_idx = MODE_800x600;
else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
mode_idx = MODE_1024x768;
else
return;
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
DRM_DEBUG_KMS("switching to 800x600\n");
/* Hopefully doing it every time won't hurt... */
for (i = 0; i < ARRAY_SIZE(regs_init); i++)
ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
ok &= ns2501_writeb(dvo, 0x1b, 0x19);
ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
ok &= ns2501_writeb(dvo, 0x1d, 0x02);
ns->regs = regs_1024x768[mode_idx];
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
ok &= ns2501_writeb(dvo, 0x80, 0x27);
ok &= ns2501_writeb(dvo, 0x81, 0x03);
ok &= ns2501_writeb(dvo, 0x82, 0x41);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x04);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);
ok &= ns2501_writeb(dvo, 0x96, 0x00);
ok &= ns2501_writeb(dvo, 0x99, 0x00);
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
ok &= ns2501_writeb(dvo, 0xa4, 0x80);
ok &= ns2501_writeb(dvo, 0xb6, 0x00);
ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
ok &= ns2501_writeb(dvo, 0xc7, 0x73);
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
/* mode 274 */
DRM_DEBUG_KMS("switching to 640x480\n");
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ns->reg_8_shadow &= ~NS2501_8_BPAS;
ok &= ns2501_writeb(dvo, 0x11, 0xa0);
ok &= ns2501_writeb(dvo, 0x1b, 0x11);
ok &= ns2501_writeb(dvo, 0x1c, 0x54);
ok &= ns2501_writeb(dvo, 0x1d, 0x03);
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
ok &= ns2501_writeb(dvo, 0x80, 0xff);
ok &= ns2501_writeb(dvo, 0x81, 0x07);
ok &= ns2501_writeb(dvo, 0x82, 0x3d);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x10);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);
ok &= ns2501_writeb(dvo, 0x96, 0x05);
ok &= ns2501_writeb(dvo, 0x99, 0x00);
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
ok &= ns2501_writeb(dvo, 0x9c, 0x24);
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
ok &= ns2501_writeb(dvo, 0xa4, 0x84);
ok &= ns2501_writeb(dvo, 0xb6, 0x09);
ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc1, 0x90);
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
ok &= ns2501_writeb(dvo, 0xc5, 0x16);
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
ok &= ns2501_writeb(dvo, 0xc7, 0x02);
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
/* mode 280 */
DRM_DEBUG_KMS("switching to 1024x768\n");
/*
* This might or might not work, actually. I'm silently
* assuming here that the native panel resolution is
* 1024x768. If not, then this leaves the scaler disabled
* generating a picture that is likely not the expected.
*
* Problem is that I do not know where to take the panel
* dimensions from.
*
* Enable the bypass, scaling not required.
*
* The scaler registers are irrelevant here....
*
*/
ns->reg_8_shadow |= NS2501_8_BPAS;
ok &= ns2501_writeb(dvo, 0x37, 0x44);
} else {
/*
* Data not known. Bummer!
* Hopefully, the code should not go here
* as mode_OK delivered no other modes.
*/
ns->reg_8_shadow |= NS2501_8_BPAS;
}
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
} while (!ok && retries--);
for (i = 0; i < 84; i++)
ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
}
/* set the NS2501 power state */
@ -439,62 +568,48 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
if (!ns2501_readb(dvo, NS2501_REG8, &ch))
return false;
if (ch & NS2501_8_PD)
return true;
else
return false;
return ch & NS2501_8_PD;
}
/* set the NS2501 power state */
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
bool ok;
int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
ch = ns->reg_8_shadow;
if (enable) {
if (WARN_ON(ns->regs[83].offset != 0x08 ||
ns->regs[84].offset != 0x41 ||
ns->regs[85].offset != 0xc0))
return;
if (enable)
ch |= NS2501_8_PD;
else
ch &= ~NS2501_8_PD;
ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
ns->reg_8_set = 1;
ns->reg_8_shadow = ch;
ns2501_writeb(dvo, 0x41, ns->regs[84].value);
do {
ok = true;
ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
ok &=
ns2501_writeb(dvo, 0x34,
enable ? 0x03 : 0x00);
ok &=
ns2501_writeb(dvo, 0x35,
enable ? 0xff : 0x00);
} while (!ok && retries--);
ns2501_writeb(dvo, 0x34, 0x01);
msleep(15);
ns2501_writeb(dvo, 0x08, 0x35);
if (!(ns->regs[83].value & NS2501_8_BPAS))
ns2501_writeb(dvo, 0x08, 0x31);
msleep(200);
ns2501_writeb(dvo, 0x34, 0x03);
ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
} else {
ns2501_writeb(dvo, 0x34, 0x01);
msleep(200);
ns2501_writeb(dvo, 0x08, 0x34);
msleep(15);
ns2501_writeb(dvo, 0x34, 0x00);
}
}
static void ns2501_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG8, &val);
DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG9, &val);
DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REGC, &val);
DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
}
static void ns2501_destroy(struct intel_dvo_device *dvo)
{
struct ns2501_priv *ns = dvo->dev_priv;
@ -512,6 +627,5 @@ struct intel_dvo_dev_ops ns2501_ops = {
.mode_set = ns2501_mode_set,
.dpms = ns2501_dpms,
.get_hw_state = ns2501_get_hw_state,
.dump_regs = ns2501_dump_regs,
.destroy = ns2501_destroy,
};

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше