Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is a combo of -next and some -fixes that came in in the
  intervening time.

  Highlights:

  New drivers:
    ARM Armada driver for Marvell Armada 510 SOCs

  Intel:
    Broadwell initial support under a default off switch,
    Stereo/3D HDMI mode support
    Valleyview improvements
    Displayport improvements
    Haswell fixes
    initial mipi dsi panel support
    CRC support for debugging
    build with CONFIG_FB=n

  Radeon:
    enable DPM on a number of GPUs by default
    secondary GPU powerdown support
    enable HDMI audio by default
    Hawaii support

  Nouveau:
    dynamic pm code infrastructure reworked, does nothing major yet
    GK208 modesetting support
    MSI fixes, on by default again
    PMPEG improvements
    pageflipping fixes

  GMA500:
    minnowboard SDVO support

  VMware:
    misc fixes

  MSM:
    prime, plane and rendernodes support

  Tegra:
    rearchitected to put the drm driver into the drm subsystem.
    HDMI and gr2d support for tegra 114 SoC

  QXL:
    oops fix, and multi-head fixes

  DRM core:
    sysfs lifetime fixes
    client capability ioctl
    further cleanups to device midlayer
    more vblank timestamp fixes"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (789 commits)
  drm/nouveau: do not map evicted vram buffers in nouveau_bo_vma_add
  drm/nvc0-/gr: shift wrapping bug in nvc0_grctx_generate_r406800
  drm/nouveau/pwr: fix missing mutex unlock in a failure path
  drm/nv40/therm: fix slowing down fan when pstate undefined
  drm/nv11-: synchronise flips to vblank, unless async flip requested
  drm/nvc0-: remove nasty fifo swmthd hack for flip completion method
  drm/nv10-: we no longer need to create nvsw object on user channels
  drm/nouveau: always queue flips relative to kernel channel activity
  drm/nouveau: there is no need to reserve/fence the new fb when flipping
  drm/nouveau: when bailing out of a pushbuf ioctl, do not remove previous fence
  drm/nouveau: allow nouveau_fence_ref() to be a noop
  drm/nvc8/mc: msi rearm is via the nvc0 method
  drm/ttm: Fix vma page_prot bit manipulation
  drm/vmwgfx: Fix a couple of compile / sparse warnings and errors
  drm/vmwgfx: Resource evict fixes
  drm/edid: compare actual vrefresh for all modes for quirks
  drm: shmob_drm: Convert to clk_prepare/unprepare
  drm/nouveau: fix 32-bit build
  drm/i915/opregion: fix build error on CONFIG_ACPI=n
  Revert "drm/radeon/audio: don't set speaker allocation on DCE4+"
  ...
This commit is contained in:
Linus Torvalds 2013-11-15 14:19:54 +09:00
Родитель c681427e5c 0846c728e2
Коммит 049ffa8ab3
557 изменённых файлов: 44635 добавлений и 14944 удалений

Просмотреть файл

@ -2849,7 +2849,9 @@ L: dri-devel@lists.freedesktop.org
L: linux-tegra@vger.kernel.org
T: git git://anongit.freedesktop.org/tegra/linux.git
S: Supported
F: drivers/gpu/drm/tegra/
F: drivers/gpu/host1x/
F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt

Просмотреть файл

@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
return gmch_ctrl << 25; /* 32 MB units */
}
static inline size_t gen8_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
gmch_ctrl &= BDW_GMCH_GMS_MASK;
return gmch_ctrl << 25; /* 32 MB units */
}
typedef size_t (*stolen_size_fn)(int num, int slot, int func);
static struct pci_device_id intel_stolen_ids[] __initdata = {
@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
INTEL_IVB_D_IDS(gen6_stolen_size),
INTEL_HSW_D_IDS(gen6_stolen_size),
INTEL_HSW_M_IDS(gen6_stolen_size),
INTEL_BDW_M_IDS(gen8_stolen_size),
INTEL_BDW_D_IDS(gen8_stolen_size)
};
static void __init intel_graphics_stolen(int num, int slot, int func)

Просмотреть файл

@ -29,11 +29,17 @@ config DRM_USB
config DRM_KMS_HELPER
tristate
depends on DRM
help
CRTC helpers for KMS drivers.
config DRM_KMS_FB_HELPER
bool
depends on DRM_KMS_HELPER
select FB
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
help
FB and CRTC helpers for KMS drivers.
FBDEV helpers for KMS drivers.
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
config DRM_KMS_CMA_HELPER
bool
select DRM_GEM_CMA_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@ -96,6 +103,7 @@ config DRM_RADEON
select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
@ -120,64 +128,7 @@ config DRM_I810
selected, the module will be called i810. AGP support is required
for this driver to work.
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select THERMAL if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
replaces the older i830 module that supported a subset of the
hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
help
Choose this option if you want kernel modesetting enabled by default,
and you have a new enough userspace to support this. Running old
userspaces with this enabled will cause pain. Note that this causes
the driver to bind to PCI devices, which precludes loading things
like intelfb.
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
help
Choose this option if you have prerelease Intel hardware and want the
i915 driver to support it by default. You can enable such support at
runtime with the module option i915.preliminary_hw_support=1; this
option changes the default for that module option.
If in doubt, say "N".
source "drivers/gpu/drm/i915/Kconfig"
config DRM_MGA
tristate "Matrox g200/g400"
@ -225,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/armada/Kconfig"
source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
@ -236,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"

Просмотреть файл

@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@ -49,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_ARMADA) += armada/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/

Просмотреть файл

@ -0,0 +1,24 @@
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
supports graphics and video overlays.
This driver provides no built-in acceleration; acceleration is
performed by other IP found on the SoC. This driver provides
kernel mode setting and buffer management to userspace.
config DRM_ARMADA_TDA1998X
bool "Support TDA1998X HDMI output"
depends on DRM_ARMADA != n
depends on I2C && DRM_I2C_NXP_TDA998X = y
default y
help
Support the TDA1998x HDMI output device found on the Solid-Run
CuBox.

Просмотреть файл

@ -0,0 +1,7 @@
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
armada_gem.o armada_output.o armada_overlay.o \
armada_slave.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
obj-$(CONFIG_DRM_ARMADA) := armada.o

Просмотреть файл

@ -0,0 +1,87 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Armada 510 (aka Dove) variant support
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_hw.h"
static int armada510_init(struct armada_private *priv, struct device *dev)
{
priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
return PTR_RET(priv->extclk[0]);
}
static int armada510_crtc_init(struct armada_crtc *dcrtc)
{
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
return 0;
}
/*
* Armada510 specific SCLK register selection.
* This gets called with sclk = NULL to test whether the mode is
* supportable, and again with sclk != NULL to set the clocks up for
* that. The former can return an error, but the latter is expected
* not to.
*
* We currently are pretty rudimentary here, always selecting
* EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
*/
static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct drm_display_mode *mode, uint32_t *sclk)
{
struct armada_private *priv = dcrtc->crtc.dev->dev_private;
struct clk *clk = priv->extclk[0];
int ret;
if (dcrtc->num == 1)
return -EINVAL;
if (IS_ERR(clk))
return PTR_ERR(clk);
if (dcrtc->clk != clk) {
ret = clk_prepare_enable(clk);
if (ret)
return ret;
dcrtc->clk = clk;
}
if (sclk) {
uint32_t rate, ref, div;
rate = mode->clock * 1000;
ref = clk_round_rate(clk, rate);
div = DIV_ROUND_UP(ref, rate);
if (div < 1)
div = 1;
clk_set_rate(clk, ref);
*sclk = div | SCLK_510_EXTCLK1;
}
return 0;
}
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
.init = armada510_init,
.crtc_init = armada510_crtc_init,
.crtc_compute_clock = armada510_crtc_compute_clock,
};

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,83 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_CRTC_H
#define ARMADA_CRTC_H
struct armada_gem_object;
struct armada_regs {
uint32_t offset;
uint32_t mask;
uint32_t val;
};
#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
do { \
struct armada_regs *__reg = _r; \
__reg[_i].offset = _o; \
__reg[_i].mask = ~(_m); \
__reg[_i].val = _v; \
_i++; \
} while (0)
#define armada_reg_queue_set(_r, _i, _v, _o) \
armada_reg_queue_mod(_r, _i, _v, ~0, _o)
#define armada_reg_queue_end(_r, _i) \
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_frame_work;
struct armada_crtc {
struct drm_crtc crtc;
unsigned num;
void __iomem *base;
struct clk *clk;
struct {
uint32_t spu_v_h_total;
uint32_t spu_v_porch;
uint32_t spu_adv_reg;
} v[2];
bool interlaced;
bool cursor_update;
uint8_t csc_yuv_mode;
uint8_t csc_rgb_mode;
struct drm_plane *plane;
struct armada_gem_object *cursor_obj;
int cursor_x;
int cursor_y;
uint32_t cursor_hw_pos;
uint32_t cursor_hw_sz;
uint32_t cursor_w;
uint32_t cursor_h;
int dpms;
uint32_t cfg_dumb_ctrl;
uint32_t dumb_ctrl;
uint32_t spu_iopad_ctrl;
wait_queue_head_t frame_wait;
struct armada_frame_work *frame_work;
spinlock_t irq_lock;
uint32_t irq_ena;
struct list_head vbl_list;
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
void armada_drm_crtc_irq(struct armada_crtc *, u32);
void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
#endif

Просмотреть файл

@ -0,0 +1,177 @@
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <drm/drmP.h>
#include "armada_crtc.h"
#include "armada_drm.h"
static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct armada_private *priv = dev->dev_private;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_mm_dump_table(m, &priv->linear);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int armada_debugfs_reg_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct armada_private *priv = dev->dev_private;
int n, i;
if (priv) {
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
struct armada_crtc *dcrtc = priv->dcrtc[n];
if (!dcrtc)
continue;
for (i = 0x84; i <= 0x1c4; i += 4) {
uint32_t v = readl_relaxed(dcrtc->base + i);
seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
}
}
}
return 0;
}
static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
{
return single_open(file, armada_debugfs_reg_show, inode->i_private);
}
static const struct file_operations fops_reg_r = {
.owner = THIS_MODULE,
.open = armada_debugfs_reg_r_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int armada_debugfs_write(struct file *file, const char __user *ptr,
size_t len, loff_t *off)
{
struct drm_device *dev = file->private_data;
struct armada_private *priv = dev->dev_private;
struct armada_crtc *dcrtc = priv->dcrtc[0];
char buf[32], *p;
uint32_t reg, val;
int ret;
if (*off != 0)
return 0;
if (len > sizeof(buf) - 1)
len = sizeof(buf) - 1;
ret = strncpy_from_user(buf, ptr, len);
if (ret < 0)
return ret;
buf[len] = '\0';
reg = simple_strtoul(buf, &p, 16);
if (!isspace(*p))
return -EINVAL;
val = simple_strtoul(p + 1, NULL, 16);
if (reg >= 0x84 && reg <= 0x1c4)
writel(val, dcrtc->base + reg);
return len;
}
static const struct file_operations fops_reg_w = {
.owner = THIS_MODULE,
.open = simple_open,
.write = armada_debugfs_write,
.llseek = noop_llseek,
};
static struct drm_info_list armada_debugfs_list[] = {
{ "gem_linear", armada_debugfs_gem_linear_show, 0 },
};
#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
const void *key)
{
struct drm_info_node *node;
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
if (node == NULL) {
debugfs_remove(ent);
return -ENOMEM;
}
node->minor = minor;
node->dent = ent;
node->info_ent = (void *) key;
mutex_lock(&minor->debugfs_lock);
list_add(&node->list, &minor->debugfs_list);
mutex_unlock(&minor->debugfs_lock);
return 0;
}
static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *de;
de = debugfs_create_file(name, mode, root, minor->dev, fops);
return drm_add_fake_info_node(minor, de, fops);
}
int armada_drm_debugfs_init(struct drm_minor *minor)
{
int ret;
ret = drm_debugfs_create_files(armada_debugfs_list,
ARMADA_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
if (ret)
return ret;
ret = armada_debugfs_create(minor->debugfs_root, minor,
"reg", S_IFREG | S_IRUSR, &fops_reg_r);
if (ret)
goto err_1;
ret = armada_debugfs_create(minor->debugfs_root, minor,
"reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
if (ret)
goto err_2;
return ret;
err_2:
drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
err_1:
drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
minor);
return ret;
}
void armada_drm_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
minor);
}

Просмотреть файл

@ -0,0 +1,113 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_DRM_H
#define ARMADA_DRM_H
#include <linux/kfifo.h>
#include <linux/io.h>
#include <linux/workqueue.h>
#include <drm/drmP.h>
struct armada_crtc;
struct armada_gem_object;
struct clk;
struct drm_fb_helper;
static inline void
armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
{
uint32_t ov, v;
ov = v = readl_relaxed(ptr);
v = (v & ~mask) | val;
if (ov != v)
writel_relaxed(v, ptr);
}
static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
{
uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
/* 88AP510 spec recommends pitch be a multiple of 128 */
return ALIGN(pitch, 128);
}
struct armada_vbl_event {
struct list_head node;
void *data;
void (*fn)(struct armada_crtc *, void *);
};
void armada_drm_vbl_event_add(struct armada_crtc *,
struct armada_vbl_event *);
void armada_drm_vbl_event_remove(struct armada_crtc *,
struct armada_vbl_event *);
void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
struct armada_vbl_event *);
#define armada_drm_vbl_event_init(_e, _f, _d) do { \
struct armada_vbl_event *__e = _e; \
INIT_LIST_HEAD(&__e->node); \
__e->data = _d; \
__e->fn = _f; \
} while (0)
struct armada_private;
struct armada_variant {
bool has_spu_adv_reg;
uint32_t spu_adv_reg;
int (*init)(struct armada_private *, struct device *);
int (*crtc_init)(struct armada_crtc *);
int (*crtc_compute_clock)(struct armada_crtc *,
const struct drm_display_mode *,
uint32_t *);
};
/* Variant ops */
extern const struct armada_variant armada510_ops;
struct armada_private {
const struct armada_variant *variant;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear;
struct clk *extclk[2];
struct drm_property *csc_yuv_prop;
struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
struct drm_property *colorkey_min_prop;
struct drm_property *colorkey_max_prop;
struct drm_property *colorkey_val_prop;
struct drm_property *colorkey_alpha_prop;
struct drm_property *colorkey_mode_prop;
struct drm_property *brightness_prop;
struct drm_property *contrast_prop;
struct drm_property *saturation_prop;
#ifdef CONFIG_DEBUG_FS
struct dentry *de;
#endif
};
void __armada_drm_queue_unref_work(struct drm_device *,
struct drm_framebuffer *);
void armada_drm_queue_unref_work(struct drm_device *,
struct drm_framebuffer *);
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
int armada_drm_debugfs_init(struct drm_minor *);
void armada_drm_debugfs_cleanup(struct drm_minor *);
#endif

Просмотреть файл

@ -0,0 +1,421 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
#ifdef CONFIG_DRM_ARMADA_TDA1998X
#include <drm/i2c/tda998x.h>
#include "armada_slave.h"
static struct tda998x_encoder_params params = {
/* With 0x24, there is no translation between vp_out and int_vp
FB LCD out Pins VIP Int Vp
R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
*/
.swap_a = 2,
.swap_b = 3,
.swap_c = 4,
.swap_d = 5,
.swap_e = 0,
.swap_f = 1,
.audio_cfg = BIT(2),
.audio_frame[1] = 1,
.audio_format = AFMT_SPDIF,
.audio_sample_rate = 44100,
};
static const struct armada_drm_slave_config tda19988_config = {
.i2c_adapter_id = 0,
.crtcs = 1 << 0, /* Only LCD0 at the moment */
.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
.interlace_allowed = true,
.info = {
.type = "tda998x",
.addr = 0x70,
.platform_data = &params,
},
};
#endif
static void armada_drm_unref_work(struct work_struct *work)
{
struct armada_private *priv =
container_of(work, struct armada_private, fb_unref_work);
struct drm_framebuffer *fb;
while (kfifo_get(&priv->fb_unref, &fb))
drm_framebuffer_unreference(fb);
}
/* Must be called with dev->event_lock held */
void __armada_drm_queue_unref_work(struct drm_device *dev,
struct drm_framebuffer *fb)
{
struct armada_private *priv = dev->dev_private;
/*
* Yes, we really must jump through these hoops just to store a
* _pointer_ to something into the kfifo. This is utterly insane
* and idiotic, because it kfifo requires the _data_ pointed to by
* the pointer const, not the pointer itself. Not only that, but
* you have to pass a pointer _to_ the pointer you want stored.
*/
const struct drm_framebuffer *silly_api_alert = fb;
WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
schedule_work(&priv->fb_unref_work);
}
void armada_drm_queue_unref_work(struct drm_device *dev,
struct drm_framebuffer *fb)
{
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
__armada_drm_queue_unref_work(dev, fb);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
const struct platform_device_id *id;
struct armada_private *priv;
struct resource *res[ARRAY_SIZE(priv->dcrtc)];
struct resource *mem = NULL;
int ret, n, i;
memset(res, 0, sizeof(res));
for (n = i = 0; ; n++) {
struct resource *r = platform_get_resource(dev->platformdev,
IORESOURCE_MEM, n);
if (!r)
break;
/* Resources above 64K are graphics memory */
if (resource_size(r) > SZ_64K)
mem = r;
else if (i < ARRAY_SIZE(priv->dcrtc))
res[i++] = r;
else
return -EINVAL;
}
if (!res[0] || !mem)
return -ENXIO;
if (!devm_request_mem_region(dev->dev, mem->start,
resource_size(mem), "armada-drm"))
return -EBUSY;
priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
DRM_ERROR("failed to allocate private\n");
return -ENOMEM;
}
dev->dev_private = priv;
/* Get the implementation specific driver data. */
id = platform_get_device_id(dev->platformdev);
if (!id)
return -ENXIO;
priv->variant = (struct armada_variant *)id->driver_data;
ret = priv->variant->init(priv, dev->dev);
if (ret)
return ret;
INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
INIT_KFIFO(priv->fb_unref);
/* Mode setting support */
drm_mode_config_init(dev);
dev->mode_config.min_width = 320;
dev->mode_config.min_height = 200;
/*
* With vscale enabled, the maximum width is 1920 due to the
* 1920 by 3 lines RAM
*/
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
dev->mode_config.preferred_depth = 24;
dev->mode_config.funcs = &armada_drm_mode_config_funcs;
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
/* Create all LCD controllers */
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
if (!res[n])
break;
ret = armada_drm_crtc_create(dev, n, res[n]);
if (ret)
goto err_kms;
}
#ifdef CONFIG_DRM_ARMADA_TDA1998X
ret = armada_drm_connector_slave_create(dev, &tda19988_config);
if (ret)
goto err_kms;
#endif
ret = drm_vblank_init(dev, n);
if (ret)
goto err_kms;
ret = drm_irq_install(dev);
if (ret)
goto err_kms;
dev->vblank_disable_allowed = 1;
ret = armada_fbdev_init(dev);
if (ret)
goto err_irq;
drm_kms_helper_poll_init(dev);
return 0;
err_irq:
drm_irq_uninstall(dev);
err_kms:
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
flush_work(&priv->fb_unref_work);
return ret;
}
static int armada_drm_unload(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
drm_kms_helper_poll_fini(dev);
armada_fbdev_fini(dev);
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
flush_work(&priv->fb_unref_work);
dev->dev_private = NULL;
return 0;
}
void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
struct armada_vbl_event *evt)
{
unsigned long flags;
spin_lock_irqsave(&dcrtc->irq_lock, flags);
if (list_empty(&evt->node)) {
list_add_tail(&evt->node, &dcrtc->vbl_list);
drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
}
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
struct armada_vbl_event *evt)
{
if (!list_empty(&evt->node)) {
list_del_init(&evt->node);
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
}
}
void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
struct armada_vbl_event *evt)
{
unsigned long flags;
spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_vbl_event_remove(dcrtc, evt);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
/* These are called under the vbl_lock. */
static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
{
struct armada_private *priv = dev->dev_private;
armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
return 0;
}
static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
{
struct armada_private *priv = dev->dev_private;
armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
}
static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct armada_private *priv = dev->dev_private;
struct armada_crtc *dcrtc = priv->dcrtc[0];
uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
irqreturn_t handled = IRQ_NONE;
/*
* This is rediculous - rather than writing bits to clear, we
* have to set the actual status register value. This is racy.
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
/* Mask out those interrupts we haven't enabled */
v = stat & dcrtc->irq_ena;
if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
armada_drm_crtc_irq(dcrtc, stat);
handled = IRQ_HANDLED;
}
return handled;
}
static int armada_drm_irq_postinstall(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
struct armada_crtc *dcrtc = priv->dcrtc[0];
spin_lock_irq(&dev->vbl_lock);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
spin_unlock_irq(&dev->vbl_lock);
return 0;
}
static void armada_drm_irq_uninstall(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
struct armada_crtc *dcrtc = priv->dcrtc[0];
writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
}
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
DRM_UNLOCKED),
};
static const struct file_operations armada_drm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = drm_read,
.poll = drm_poll,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.open = drm_open,
.release = drm_release,
};
static struct drm_driver armada_drm_driver = {
.load = armada_drm_load,
.open = NULL,
.preclose = NULL,
.postclose = NULL,
.lastclose = NULL,
.unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
.irq_handler = armada_drm_irq_handler,
.irq_postinstall = armada_drm_irq_postinstall,
.irq_uninstall = armada_drm_irq_uninstall,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
#endif
.gem_free_object = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = armada_gem_prime_export,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
.dumb_map_offset = armada_gem_dumb_map_offset,
.dumb_destroy = armada_gem_dumb_destroy,
.gem_vm_ops = &armada_gem_vm_ops,
.major = 1,
.minor = 0,
.name = "armada-drm",
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
DRIVER_HAVE_IRQ | DRIVER_PRIME,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
static int armada_drm_probe(struct platform_device *pdev)
{
return drm_platform_init(&armada_drm_driver, pdev);
}
static int armada_drm_remove(struct platform_device *pdev)
{
drm_platform_exit(&armada_drm_driver, pdev);
return 0;
}
static const struct platform_device_id armada_drm_platform_ids[] = {
{
.name = "armada-drm",
.driver_data = (unsigned long)&armada510_ops,
}, {
.name = "armada-510-drm",
.driver_data = (unsigned long)&armada510_ops,
},
{ },
};
MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
.remove = armada_drm_remove,
.driver = {
.name = "armada-drm",
.owner = THIS_MODULE,
},
.id_table = armada_drm_platform_ids,
};
static int __init armada_drm_init(void)
{
armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
return platform_driver_register(&armada_drm_platform_driver);
}
module_init(armada_drm_init);
static void __exit armada_drm_exit(void)
{
platform_driver_unregister(&armada_drm_platform_driver);
}
module_exit(armada_drm_exit);
MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
MODULE_DESCRIPTION("Armada DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:armada-drm");

Просмотреть файл

@ -0,0 +1,170 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
static void armada_fb_destroy(struct drm_framebuffer *fb)
{
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
drm_framebuffer_cleanup(&dfb->fb);
drm_gem_object_unreference_unlocked(&dfb->obj->obj);
kfree(dfb);
}
static int armada_fb_create_handle(struct drm_framebuffer *fb,
struct drm_file *dfile, unsigned int *handle)
{
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
}
static const struct drm_framebuffer_funcs armada_fb_funcs = {
.destroy = armada_fb_destroy,
.create_handle = armada_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
{
struct armada_framebuffer *dfb;
uint8_t format, config;
int ret;
switch (mode->pixel_format) {
#define FMT(drm, fmt, mod) \
case DRM_FORMAT_##drm: \
format = CFG_##fmt; \
config = mod; \
break
FMT(RGB565, 565, CFG_SWAPRB);
FMT(BGR565, 565, 0);
FMT(ARGB1555, 1555, CFG_SWAPRB);
FMT(ABGR1555, 1555, 0);
FMT(RGB888, 888PACK, CFG_SWAPRB);
FMT(BGR888, 888PACK, 0);
FMT(XRGB8888, X888, CFG_SWAPRB);
FMT(XBGR8888, X888, 0);
FMT(ARGB8888, 8888, CFG_SWAPRB);
FMT(ABGR8888, 8888, 0);
FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
FMT(UYVY, 422PACK, CFG_YUV2RGB);
FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
FMT(YUV422, 422, CFG_YUV2RGB);
FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
FMT(YUV420, 420, CFG_YUV2RGB);
FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
FMT(C8, PSEUDO8, 0);
#undef FMT
default:
return ERR_PTR(-EINVAL);
}
dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
if (!dfb) {
DRM_ERROR("failed to allocate Armada fb object\n");
return ERR_PTR(-ENOMEM);
}
dfb->fmt = format;
dfb->mod = config;
dfb->obj = obj;
drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
kfree(dfb);
return ERR_PTR(ret);
}
/*
* Take a reference on our object as we're successful - the
* caller already holds a reference, which keeps us safe for
* the above call, but the caller will drop their reference
* to it. Hence we need to take our own reference.
*/
drm_gem_object_reference(&obj->obj);
return dfb;
}
static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
{
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
mode->width, mode->height, mode->pixel_format,
mode->flags, mode->pitches[0], mode->pitches[1],
mode->pitches[2]);
/* We can only handle a single plane at the moment */
if (drm_format_num_planes(mode->pixel_format) > 1 &&
(mode->handles[0] != mode->handles[1] ||
mode->handles[0] != mode->handles[2])) {
ret = -EINVAL;
goto err;
}
obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
if (!obj) {
ret = -ENOENT;
goto err;
}
if (obj->obj.import_attach && !obj->sgt) {
ret = armada_gem_map_import(obj);
if (ret)
goto err_unref;
}
/* Framebuffer objects must have a valid device address for scanout */
if (obj->dev_addr == DMA_ERROR_CODE) {
ret = -EINVAL;
goto err_unref;
}
dfb = armada_framebuffer_create(dev, mode, obj);
if (IS_ERR(dfb)) {
ret = PTR_ERR(dfb);
goto err;
}
drm_gem_object_unreference_unlocked(&obj->obj);
return &dfb->fb;
err_unref:
drm_gem_object_unreference_unlocked(&obj->obj);
err:
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
}
static void armada_output_poll_changed(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
struct drm_fb_helper *fbh = priv->fbdev;
if (fbh)
drm_fb_helper_hotplug_event(fbh);
}
const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
.fb_create = armada_fb_create,
.output_poll_changed = armada_output_poll_changed,
};

Просмотреть файл

@ -0,0 +1,24 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_FB_H
#define ARMADA_FB_H
struct armada_framebuffer {
struct drm_framebuffer fb;
struct armada_gem_object *obj;
uint8_t fmt;
uint8_t mod;
};
#define drm_fb_to_armada_fb(dfb) \
container_of(dfb, struct armada_framebuffer, fb)
#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
#endif

Просмотреть файл

@ -0,0 +1,202 @@
/*
* Copyright (C) 2012 Russell King
* Written from the i915 driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
static /*const*/ struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static int armada_fb_create(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
struct drm_mode_fb_cmd2 mode;
struct armada_framebuffer *dfb;
struct armada_gem_object *obj;
struct fb_info *info;
int size, ret;
void *ptr;
memset(&mode, 0, sizeof(mode));
mode.width = sizes->surface_width;
mode.height = sizes->surface_height;
mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode.pitches[0] * mode.height;
obj = armada_gem_alloc_private_object(dev, size);
if (!obj) {
DRM_ERROR("failed to allocate fb memory\n");
return -ENOMEM;
}
ret = armada_gem_linear_back(dev, obj);
if (ret) {
drm_gem_object_unreference_unlocked(&obj->obj);
return ret;
}
ptr = armada_gem_map_object(dev, obj);
if (!ptr) {
drm_gem_object_unreference_unlocked(&obj->obj);
return -ENOMEM;
}
dfb = armada_framebuffer_create(dev, &mode, obj);
/*
* A reference is now held by the framebuffer object if
* successful, otherwise this drops the ref for the error path.
*/
drm_gem_object_unreference_unlocked(&obj->obj);
if (IS_ERR(dfb))
return PTR_ERR(dfb);
info = framebuffer_alloc(0, dev->dev);
if (!info) {
ret = -ENOMEM;
goto err_fballoc;
}
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto err_fbcmap;
}
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
info->par = fbh;
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
info->screen_size = obj->obj.size;
info->screen_base = ptr;
fbh->fb = &dfb->fb;
fbh->fbdev = info;
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
dfb->fb.width, dfb->fb.height,
dfb->fb.bits_per_pixel, obj->phys_addr);
return 0;
err_fbcmap:
framebuffer_release(info);
err_fballoc:
dfb->fb.funcs->destroy(&dfb->fb);
return ret;
}
static int armada_fb_probe(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
int ret = 0;
if (!fbh->fb) {
ret = armada_fb_create(fbh, sizes);
if (ret == 0)
ret = 1;
}
return ret;
}
static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
.gamma_set = armada_drm_crtc_gamma_set,
.gamma_get = armada_drm_crtc_gamma_get,
.fb_probe = armada_fb_probe,
};
int armada_fbdev_init(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
struct drm_fb_helper *fbh;
int ret;
fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
if (!fbh)
return -ENOMEM;
priv->fbdev = fbh;
fbh->funcs = &armada_fb_helper_funcs;
ret = drm_fb_helper_init(dev, fbh, 1, 1);
if (ret) {
DRM_ERROR("failed to initialize drm fb helper\n");
goto err_fb_helper;
}
ret = drm_fb_helper_single_add_all_connectors(fbh);
if (ret) {
DRM_ERROR("failed to add fb connectors\n");
goto err_fb_setup;
}
ret = drm_fb_helper_initial_config(fbh, 32);
if (ret) {
DRM_ERROR("failed to set initial config\n");
goto err_fb_setup;
}
return 0;
err_fb_setup:
drm_fb_helper_fini(fbh);
err_fb_helper:
priv->fbdev = NULL;
return ret;
}
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
struct drm_fb_helper *fbh = priv->fbdev;
if (fbh) {
struct fb_info *info = fbh->fbdev;
if (info) {
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
drm_fb_helper_fini(fbh);
priv->fbdev = NULL;
}
}

Просмотреть файл

@ -0,0 +1,611 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
#include "armada_drm.h"
#include "armada_gem.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
unsigned long addr = (unsigned long)vmf->virtual_address;
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
int ret;
pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
ret = vm_insert_pfn(vma, addr, pfn);
switch (ret) {
case 0:
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
}
const struct vm_operations_struct armada_gem_vm_ops = {
.fault = armada_gem_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static size_t roundup_gem_size(size_t size)
{
return roundup(size, PAGE_SIZE);
}
/* dev->struct_mutex is held here */
void armada_gem_free_object(struct drm_gem_object *obj)
{
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
drm_gem_free_mmap_offset(&dobj->obj);
if (dobj->page) {
/* page backed memory */
unsigned int order = get_order(dobj->obj.size);
__free_pages(dobj->page, order);
} else if (dobj->linear) {
/* linear backed memory */
drm_mm_remove_node(dobj->linear);
kfree(dobj->linear);
if (dobj->addr)
iounmap(dobj->addr);
}
if (dobj->obj.import_attach) {
/* We only ever display imported data */
dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
drm_gem_object_release(&dobj->obj);
kfree(dobj);
}
int
armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
{
struct armada_private *priv = dev->dev_private;
size_t size = obj->obj.size;
if (obj->page || obj->linear)
return 0;
/*
* If it is a small allocation (typically cursor, which will
* be 32x64 or 64x32 ARGB pixels) try to get it from the system.
* Framebuffers will never be this small (our minimum size for
* framebuffers is larger than this anyway.) Such objects are
* only accessed by the CPU so we don't need any special handing
* here.
*/
if (size <= 8192) {
unsigned int order = get_order(size);
struct page *p = alloc_pages(GFP_KERNEL, order);
if (p) {
obj->addr = page_address(p);
obj->phys_addr = page_to_phys(p);
obj->page = p;
memset(obj->addr, 0, PAGE_ALIGN(size));
}
}
/*
* We could grab something from CMA if it's enabled, but that
* involves building in a problem:
*
* CMA's interface uses dma_alloc_coherent(), which provides us
* with an CPU virtual address and a device address.
*
* The CPU virtual address may be either an address in the kernel
* direct mapped region (for example, as it would be on x86) or
* it may be remapped into another part of kernel memory space
* (eg, as it would be on ARM.) This means virt_to_phys() on the
* returned virtual address is invalid depending on the architecture
* implementation.
*
* The device address may also not be a physical address; it may
* be that there is some kind of remapping between the device and
* system RAM, which makes the use of the device address also
* unsafe to re-use as a physical address.
*
* This makes DRM usage of dma_alloc_coherent() in a generic way
* at best very questionable and unsafe.
*/
/* Otherwise, grab it from our linear allocation */
if (!obj->page) {
struct drm_mm_node *node;
unsigned align = min_t(unsigned, size, SZ_2M);
void __iomem *ptr;
int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOSPC;
mutex_lock(&dev->struct_mutex);
ret = drm_mm_insert_node(&priv->linear, node, size, align,
DRM_MM_SEARCH_DEFAULT);
mutex_unlock(&dev->struct_mutex);
if (ret) {
kfree(node);
return ret;
}
obj->linear = node;
/* Ensure that the memory we're returning is cleared. */
ptr = ioremap_wc(obj->linear->start, size);
if (!ptr) {
mutex_lock(&dev->struct_mutex);
drm_mm_remove_node(obj->linear);
mutex_unlock(&dev->struct_mutex);
kfree(obj->linear);
obj->linear = NULL;
return -ENOMEM;
}
memset_io(ptr, 0, size);
iounmap(ptr);
obj->phys_addr = obj->linear->start;
obj->dev_addr = obj->linear->start;
}
DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
obj, obj->phys_addr, obj->dev_addr);
return 0;
}
void *
armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
{
/* only linear objects need to be ioremap'd */
if (!dobj->addr && dobj->linear)
dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
return dobj->addr;
}
struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
{
struct armada_gem_object *obj;
size = roundup_gem_size(size);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
drm_gem_private_object_init(dev, &obj->obj, size);
obj->dev_addr = DMA_ERROR_CODE;
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
return obj;
}
struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct armada_gem_object *obj;
struct address_space *mapping;
size = roundup_gem_size(size);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
if (drm_gem_object_init(dev, &obj->obj, size)) {
kfree(obj);
return NULL;
}
obj->dev_addr = DMA_ERROR_CODE;
mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
return obj;
}
/* Dumb alloc support */
int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct armada_gem_object *dobj;
u32 handle;
size_t size;
int ret;
args->pitch = armada_pitch(args->width, args->bpp);
args->size = size = args->pitch * args->height;
dobj = armada_gem_alloc_private_object(dev, size);
if (dobj == NULL)
return -ENOMEM;
ret = armada_gem_linear_back(dev, dobj);
if (ret)
goto err;
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
if (ret)
goto err;
args->handle = handle;
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
drm_gem_object_unreference_unlocked(&dobj->obj);
return ret;
}
int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct armada_gem_object *obj;
int ret = 0;
mutex_lock(&dev->struct_mutex);
obj = armada_gem_object_lookup(dev, file, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
ret = -EINVAL;
goto err_unlock;
}
/* Don't allow imported objects to be mapped */
if (obj->obj.import_attach) {
ret = -EINVAL;
goto err_unlock;
}
ret = drm_gem_create_mmap_offset(&obj->obj);
if (ret == 0) {
*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
}
drm_gem_object_unreference(&obj->obj);
err_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
/* Private driver gem ioctls */
int armada_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_armada_gem_create *args = data;
struct armada_gem_object *dobj;
size_t size;
u32 handle;
int ret;
if (args->size == 0)
return -ENOMEM;
size = args->size;
dobj = armada_gem_alloc_object(dev, size);
if (dobj == NULL)
return -ENOMEM;
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
if (ret)
goto err;
args->handle = handle;
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
drm_gem_object_unreference_unlocked(&dobj->obj);
return ret;
}
/* Map a shmem-backed object into process memory space */
int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_armada_gem_mmap *args = data;
struct armada_gem_object *dobj;
unsigned long addr;
dobj = armada_gem_object_lookup(dev, file, args->handle);
if (dobj == NULL)
return -ENOENT;
if (!dobj->obj.filp) {
drm_gem_object_unreference(&dobj->obj);
return -EINVAL;
}
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, args->offset);
drm_gem_object_unreference(&dobj->obj);
if (IS_ERR_VALUE(addr))
return addr;
args->addr = addr;
return 0;
}
int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_armada_gem_pwrite *args = data;
struct armada_gem_object *dobj;
char __user *ptr;
int ret;
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
args->handle, args->offset, args->size, args->ptr);
if (args->size == 0)
return 0;
ptr = (char __user *)(uintptr_t)args->ptr;
if (!access_ok(VERIFY_READ, ptr, args->size))
return -EFAULT;
ret = fault_in_multipages_readable(ptr, args->size);
if (ret)
return ret;
dobj = armada_gem_object_lookup(dev, file, args->handle);
if (dobj == NULL)
return -ENOENT;
/* Must be a kernel-mapped object */
if (!dobj->addr)
return -EINVAL;
if (args->offset > dobj->obj.size ||
args->size > dobj->obj.size - args->offset) {
DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
ret = -EINVAL;
goto unref;
}
if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
ret = -EFAULT;
} else if (dobj->update) {
dobj->update(dobj->update_data);
ret = 0;
}
unref:
drm_gem_object_unreference_unlocked(&dobj->obj);
return ret;
}
/* Prime support */
struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
struct scatterlist *sg;
struct sg_table *sgt;
int i, num;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return NULL;
if (dobj->obj.filp) {
struct address_space *mapping;
gfp_t gfp;
int count;
count = dobj->obj.size / PAGE_SIZE;
if (sg_alloc_table(sgt, count, GFP_KERNEL))
goto free_sgt;
mapping = file_inode(dobj->obj.filp)->i_mapping;
gfp = mapping_gfp_mask(mapping);
for_each_sg(sgt->sgl, sg, count, i) {
struct page *page;
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
num = i;
goto release;
}
sg_set_page(sg, page, PAGE_SIZE, 0);
}
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
num = sgt->nents;
goto release;
}
} else if (dobj->page) {
/* Single contiguous page */
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free_sgt;
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
goto free_table;
} else if (dobj->linear) {
/* Single contiguous physical region - no struct page */
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free_sgt;
sg_dma_address(sgt->sgl) = dobj->dev_addr;
sg_dma_len(sgt->sgl) = dobj->obj.size;
} else {
goto free_sgt;
}
return sgt;
release:
for_each_sg(sgt->sgl, sg, num, i)
page_cache_release(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
kfree(sgt);
return NULL;
}
static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
int i;
if (!dobj->linear)
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
page_cache_release(sg_page(sg));
}
sg_free_table(sgt);
kfree(sgt);
}
static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
{
return NULL;
}
static void
armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
{
}
static int
armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
return -EINVAL;
}
static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.kmap_atomic = armada_gem_dmabuf_no_kmap,
.kunmap_atomic = armada_gem_dmabuf_no_kunmap,
.kmap = armada_gem_dmabuf_no_kmap,
.kunmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
};
struct dma_buf *
armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
int flags)
{
return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
O_RDWR);
}
struct drm_gem_object *
armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
{
struct dma_buf_attachment *attach;
struct armada_gem_object *dobj;
if (buf->ops == &armada_gem_prime_dmabuf_ops) {
struct drm_gem_object *obj = buf->priv;
if (obj->dev == dev) {
/*
* Importing our own dmabuf(s) increases the
* refcount on the gem object itself.
*/
drm_gem_object_reference(obj);
dma_buf_put(buf);
return obj;
}
}
attach = dma_buf_attach(buf, dev->dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
dobj = armada_gem_alloc_private_object(dev, buf->size);
if (!dobj) {
dma_buf_detach(buf, attach);
return ERR_PTR(-ENOMEM);
}
dobj->obj.import_attach = attach;
/*
* Don't call dma_buf_map_attachment() here - it maps the
* scatterlist immediately for DMA, and this is not always
* an appropriate thing to do.
*/
return &dobj->obj;
}
int armada_gem_map_import(struct armada_gem_object *dobj)
{
int ret;
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
DMA_TO_DEVICE);
if (!dobj->sgt) {
DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
return -EINVAL;
}
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
return ret;
}
if (dobj->sgt->nents > 1) {
DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
return -EINVAL;
}
if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
return -EINVAL;
}
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
return 0;
}

Просмотреть файл

@ -0,0 +1,52 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_GEM_H
#define ARMADA_GEM_H
/* GEM */
struct armada_gem_object {
struct drm_gem_object obj;
void *addr;
phys_addr_t phys_addr;
resource_size_t dev_addr;
struct drm_mm_node *linear; /* for linear backed */
struct page *page; /* for page backed */
struct sg_table *sgt; /* for imported */
void (*update)(void *);
void *update_data;
};
extern const struct vm_operations_struct armada_gem_vm_ops;
#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
void armada_gem_free_object(struct drm_gem_object *);
int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
size_t);
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *);
int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
uint32_t, uint64_t *);
int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
uint32_t);
struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
struct dma_buf *);
int armada_gem_map_import(struct armada_gem_object *);
static inline struct armada_gem_object *armada_gem_object_lookup(
struct drm_device *dev, struct drm_file *dfile, unsigned handle)
{
struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
return obj ? drm_to_armada_gem(obj) : NULL;
}
#endif

Просмотреть файл

@ -0,0 +1,318 @@
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_HW_H
#define ARMADA_HW_H
/*
* Note: the following registers are written from IRQ context:
* LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
* LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
* LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
* LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
*/
enum {
LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
LCD_SPU_DMA_PITCH_YC = 0x00e0,
LCD_SPU_DMA_PITCH_UV = 0x00e4,
LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
LCD_SPU_DMA_HPXL_VLN = 0x00ec,
LCD_SPU_DZM_HPXL_VLN = 0x00f0,
LCD_CFG_GRA_START_ADDR0 = 0x00f4,
LCD_CFG_GRA_START_ADDR1 = 0x00f8,
LCD_CFG_GRA_PITCH = 0x00fc,
LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
LCD_SPU_GRA_HPXL_VLN = 0x0104,
LCD_SPU_GZM_HPXL_VLN = 0x0108,
LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
LCD_SPU_HWC_HPXL_VLN = 0x0110,
LCD_SPUT_V_H_TOTAL = 0x0114,
LCD_SPU_V_H_ACTIVE = 0x0118,
LCD_SPU_H_PORCH = 0x011c,
LCD_SPU_V_PORCH = 0x0120,
LCD_SPU_BLANKCOLOR = 0x0124,
LCD_SPU_ALPHA_COLOR1 = 0x0128,
LCD_SPU_ALPHA_COLOR2 = 0x012c,
LCD_SPU_COLORKEY_Y = 0x0130,
LCD_SPU_COLORKEY_U = 0x0134,
LCD_SPU_COLORKEY_V = 0x0138,
LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
LCD_SPU_SPI_RXDATA = 0x0140,
LCD_SPU_ISA_RXDATA = 0x0144,
LCD_SPU_HWC_RDDAT = 0x0158,
LCD_SPU_GAMMA_RDDAT = 0x015c,
LCD_SPU_PALETTE_RDDAT = 0x0160,
LCD_SPU_IOPAD_IN = 0x0178,
LCD_CFG_RDREG5F = 0x017c,
LCD_SPU_SPI_CTRL = 0x0180,
LCD_SPU_SPI_TXDATA = 0x0184,
LCD_SPU_SMPN_CTRL = 0x0188,
LCD_SPU_DMA_CTRL0 = 0x0190,
LCD_SPU_DMA_CTRL1 = 0x0194,
LCD_SPU_SRAM_CTRL = 0x0198,
LCD_SPU_SRAM_WRDAT = 0x019c,
LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
LCD_SPU_SRAM_PARA1 = 0x01a4,
LCD_CFG_SCLK_DIV = 0x01a8,
LCD_SPU_CONTRAST = 0x01ac,
LCD_SPU_SATURATION = 0x01b0,
LCD_SPU_CBSH_HUE = 0x01b4,
LCD_SPU_DUMB_CTRL = 0x01b8,
LCD_SPU_IOPAD_CONTROL = 0x01bc,
LCD_SPU_IRQ_ENA = 0x01c0,
LCD_SPU_IRQ_ISR = 0x01c4,
};
/* For LCD_SPU_ADV_REG */
enum {
ADV_VSYNC_L_OFF = 0xfff << 20,
ADV_GRACOLORKEY = 1 << 19,
ADV_VIDCOLORKEY = 1 << 18,
ADV_HWC32BLEND = 1 << 15,
ADV_HWC32ARGB = 1 << 14,
ADV_HWC32ENABLE = 1 << 13,
ADV_VSYNCOFFEN = 1 << 12,
ADV_VSYNC_H_OFF = 0xfff << 0,
};
enum {
CFG_565 = 0,
CFG_1555 = 1,
CFG_888PACK = 2,
CFG_X888 = 3,
CFG_8888 = 4,
CFG_422PACK = 5,
CFG_422 = 6,
CFG_420 = 7,
CFG_PSEUDO4 = 9,
CFG_PSEUDO8 = 10,
CFG_SWAPRB = 1 << 4,
CFG_SWAPUV = 1 << 3,
CFG_SWAPYU = 1 << 2,
CFG_YUV2RGB = 1 << 1,
};
/* For LCD_SPU_DMA_CTRL0 */
enum {
CFG_NOBLENDING = 1 << 31,
CFG_GAMMA_ENA = 1 << 30,
CFG_CBSH_ENA = 1 << 29,
CFG_PALETTE_ENA = 1 << 28,
CFG_ARBFAST_ENA = 1 << 27,
CFG_HWC_1BITMOD = 1 << 26,
CFG_HWC_1BITENA = 1 << 25,
CFG_HWC_ENA = 1 << 24,
CFG_DMAFORMAT = 0xf << 20,
#define CFG_DMA_FMT(x) ((x) << 20)
CFG_GRAFORMAT = 0xf << 16,
#define CFG_GRA_FMT(x) ((x) << 16)
#define CFG_GRA_MOD(x) ((x) << 8)
CFG_GRA_FTOGGLE = 1 << 15,
CFG_GRA_HSMOOTH = 1 << 14,
CFG_GRA_TSTMODE = 1 << 13,
CFG_GRA_ENA = 1 << 8,
#define CFG_DMA_MOD(x) ((x) << 0)
CFG_DMA_FTOGGLE = 1 << 7,
CFG_DMA_HSMOOTH = 1 << 6,
CFG_DMA_TSTMODE = 1 << 5,
CFG_DMA_ENA = 1 << 0,
};
enum {
CKMODE_DISABLE = 0,
CKMODE_Y = 1,
CKMODE_U = 2,
CKMODE_RGB = 3,
CKMODE_V = 4,
CKMODE_R = 5,
CKMODE_G = 6,
CKMODE_B = 7,
};
/* For LCD_SPU_DMA_CTRL1 */
enum {
CFG_FRAME_TRIG = 1 << 31,
CFG_VSYNC_INV = 1 << 27,
CFG_CKMODE_MASK = 0x7 << 24,
#define CFG_CKMODE(x) ((x) << 24)
CFG_CARRY = 1 << 23,
CFG_GATED_CLK = 1 << 21,
CFG_PWRDN_ENA = 1 << 20,
CFG_DSCALE_MASK = 0x3 << 18,
CFG_DSCALE_NONE = 0x0 << 18,
CFG_DSCALE_HALF = 0x1 << 18,
CFG_DSCALE_QUAR = 0x2 << 18,
CFG_ALPHAM_MASK = 0x3 << 16,
CFG_ALPHAM_VIDEO = 0x0 << 16,
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
CFG_PIXCMD_MASK = 0xff,
};
/* For LCD_SPU_SRAM_CTRL */
enum {
SRAM_READ = 0 << 14,
SRAM_WRITE = 2 << 14,
SRAM_INIT = 3 << 14,
SRAM_HWC32_RAM1 = 0xc << 8,
SRAM_HWC32_RAM2 = 0xd << 8,
SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
SRAM_HWC32_RAMB = 0xe << 8,
SRAM_HWC32_TRAN = 0xf << 8,
SRAM_HWC = 0xf << 8,
};
/* For LCD_SPU_SRAM_PARA1 */
enum {
CFG_CSB_256x32 = 1 << 15, /* cursor */
CFG_CSB_256x24 = 1 << 14, /* palette */
CFG_CSB_256x8 = 1 << 13, /* gamma */
CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
CFG_PDWN256x32 = 1 << 7, /* power down cursor */
CFG_PDWN256x24 = 1 << 6, /* power down palette */
CFG_PDWN256x8 = 1 << 5, /* power down gamma */
CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
};
/* For LCD_CFG_SCLK_DIV */
enum {
/* Armada 510 */
SCLK_510_AXI = 0x0 << 30,
SCLK_510_EXTCLK0 = 0x1 << 30,
SCLK_510_PLL = 0x2 << 30,
SCLK_510_EXTCLK1 = 0x3 << 30,
SCLK_510_DIV_CHANGE = 1 << 29,
SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
SCLK_510_INT_DIV_MASK = 0xffff << 0,
/* Armada 16x */
SCLK_16X_AHB = 0x0 << 28,
SCLK_16X_PCLK = 0x1 << 28,
SCLK_16X_AXI = 0x4 << 28,
SCLK_16X_PLL = 0x8 << 28,
SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
SCLK_16X_INT_DIV_MASK = 0xffff << 0,
};
/* For LCD_SPU_DUMB_CTRL */
enum {
DUMB16_RGB565_0 = 0x0 << 28,
DUMB16_RGB565_1 = 0x1 << 28,
DUMB18_RGB666_0 = 0x2 << 28,
DUMB18_RGB666_1 = 0x3 << 28,
DUMB12_RGB444_0 = 0x4 << 28,
DUMB12_RGB444_1 = 0x5 << 28,
DUMB24_RGB888_0 = 0x6 << 28,
DUMB_BLANK = 0x7 << 28,
DUMB_MASK = 0xf << 28,
CFG_BIAS_OUT = 1 << 8,
CFG_REV_RGB = 1 << 7,
CFG_INV_CBLANK = 1 << 6,
CFG_INV_CSYNC = 1 << 5, /* Normally active high */
CFG_INV_HENA = 1 << 4,
CFG_INV_VSYNC = 1 << 3, /* Normally active high */
CFG_INV_HSYNC = 1 << 2, /* Normally active high */
CFG_INV_PCLK = 1 << 1,
CFG_DUMB_ENA = 1 << 0,
};
/* For LCD_SPU_IOPAD_CONTROL */
enum {
CFG_VSCALE_LN_EN = 3 << 18,
CFG_GRA_VM_ENA = 1 << 15,
CFG_DMA_VM_ENA = 1 << 13,
CFG_CMD_VM_ENA = 1 << 11,
CFG_CSC_MASK = 3 << 8,
CFG_CSC_YUV_CCIR709 = 1 << 9,
CFG_CSC_YUV_CCIR601 = 0 << 9,
CFG_CSC_RGB_STUDIO = 1 << 8,
CFG_CSC_RGB_COMPUTER = 0 << 8,
CFG_IOPAD_MASK = 0xf << 0,
CFG_IOPAD_DUMB24 = 0x0 << 0,
CFG_IOPAD_DUMB18SPI = 0x1 << 0,
CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
CFG_IOPAD_DUMB16SPI = 0x3 << 0,
CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
CFG_IOPAD_SMART18 = 0x6 << 0,
CFG_IOPAD_SMART16 = 0x7 << 0,
CFG_IOPAD_SMART8 = 0x8 << 0,
};
#define IOPAD_DUMB24 0x0
/* For LCD_SPU_IRQ_ENA */
enum {
DMA_FRAME_IRQ0_ENA = 1 << 31,
DMA_FRAME_IRQ1_ENA = 1 << 30,
DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
DMA_FF_UNDERFLOW_ENA = 1 << 29,
GRA_FRAME_IRQ0_ENA = 1 << 27,
GRA_FRAME_IRQ1_ENA = 1 << 26,
GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
GRA_FF_UNDERFLOW_ENA = 1 << 25,
VSYNC_IRQ_ENA = 1 << 23,
DUMB_FRAMEDONE_ENA = 1 << 22,
TWC_FRAMEDONE_ENA = 1 << 21,
HWC_FRAMEDONE_ENA = 1 << 20,
SLV_IRQ_ENA = 1 << 19,
SPI_IRQ_ENA = 1 << 18,
PWRDN_IRQ_ENA = 1 << 17,
ERR_IRQ_ENA = 1 << 16,
CLEAN_SPU_IRQ_ISR = 0xffff,
};
/* For LCD_SPU_IRQ_ISR */
enum {
DMA_FRAME_IRQ0 = 1 << 31,
DMA_FRAME_IRQ1 = 1 << 30,
DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
DMA_FF_UNDERFLOW = 1 << 29,
GRA_FRAME_IRQ0 = 1 << 27,
GRA_FRAME_IRQ1 = 1 << 26,
GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
GRA_FF_UNDERFLOW = 1 << 25,
VSYNC_IRQ = 1 << 23,
DUMB_FRAMEDONE = 1 << 22,
TWC_FRAMEDONE = 1 << 21,
HWC_FRAMEDONE = 1 << 20,
SLV_IRQ = 1 << 19,
SPI_IRQ = 1 << 18,
PWRDN_IRQ = 1 << 17,
ERR_IRQ = 1 << 16,
DMA_FRAME_IRQ0_LEVEL = 1 << 15,
DMA_FRAME_IRQ1_LEVEL = 1 << 14,
DMA_FRAME_CNT_ISR = 3 << 12,
GRA_FRAME_IRQ0_LEVEL = 1 << 11,
GRA_FRAME_IRQ1_LEVEL = 1 << 10,
GRA_FRAME_CNT_ISR = 3 << 8,
VSYNC_IRQ_LEVEL = 1 << 7,
DUMB_FRAMEDONE_LEVEL = 1 << 6,
TWC_FRAMEDONE_LEVEL = 1 << 5,
HWC_FRAMEDONE_LEVEL = 1 << 4,
SLV_FF_EMPTY = 1 << 3,
DMA_FF_ALLEMPTY = 1 << 2,
GRA_FF_ALLEMPTY = 1 << 1,
PWRDN_IRQ_LEVEL = 1 << 0,
};
#endif

Просмотреть файл

@ -0,0 +1,18 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_IOCTLP_H
#define ARMADA_IOCTLP_H
#define ARMADA_IOCTL_PROTO(name)\
extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
ARMADA_IOCTL_PROTO(gem_create);
ARMADA_IOCTL_PROTO(gem_mmap);
ARMADA_IOCTL_PROTO(gem_pwrite);
#endif

Просмотреть файл

@ -0,0 +1,158 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
#include "armada_output.h"
#include "armada_drm.h"
struct armada_connector {
struct drm_connector conn;
const struct armada_output_type *type;
};
#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
{
struct drm_encoder *enc = conn->encoder;
return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
}
static enum drm_connector_status armada_drm_connector_detect(
struct drm_connector *conn, bool force)
{
struct armada_connector *dconn = drm_to_armada_conn(conn);
enum drm_connector_status status = connector_status_disconnected;
if (dconn->type->detect) {
status = dconn->type->detect(conn, force);
} else {
struct drm_encoder *enc = armada_drm_connector_encoder(conn);
if (enc)
status = encoder_helper_funcs(enc)->detect(enc, conn);
}
return status;
}
static void armada_drm_connector_destroy(struct drm_connector *conn)
{
struct armada_connector *dconn = drm_to_armada_conn(conn);
drm_sysfs_connector_remove(conn);
drm_connector_cleanup(conn);
kfree(dconn);
}
static int armada_drm_connector_set_property(struct drm_connector *conn,
struct drm_property *property, uint64_t value)
{
struct armada_connector *dconn = drm_to_armada_conn(conn);
if (!dconn->type->set_property)
return -EINVAL;
return dconn->type->set_property(conn, property, value);
}
static const struct drm_connector_funcs armada_drm_conn_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = armada_drm_connector_detect,
.destroy = armada_drm_connector_destroy,
.set_property = armada_drm_connector_set_property,
};
void armada_drm_encoder_prepare(struct drm_encoder *encoder)
{
encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
}
void armada_drm_encoder_commit(struct drm_encoder *encoder)
{
encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
}
bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
{
return true;
}
/* Shouldn't this be a generic helper function? */
int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
int valid = MODE_BAD;
if (encoder) {
struct drm_encoder_slave *slave = to_encoder_slave(encoder);
valid = slave->slave_funcs->mode_valid(encoder, mode);
}
return valid;
}
int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
struct drm_property *property, uint64_t value)
{
struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
int rc = -EINVAL;
if (encoder) {
struct drm_encoder_slave *slave = to_encoder_slave(encoder);
rc = slave->slave_funcs->set_property(encoder, conn, property,
value);
}
return rc;
}
int armada_output_create(struct drm_device *dev,
const struct armada_output_type *type, const void *data)
{
struct armada_connector *dconn;
int ret;
dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
if (!dconn)
return -ENOMEM;
dconn->type = type;
ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
type->connector_type);
if (ret) {
DRM_ERROR("unable to init connector\n");
goto err_destroy_dconn;
}
ret = type->create(&dconn->conn, data);
if (ret)
goto err_conn;
ret = drm_sysfs_connector_add(&dconn->conn);
if (ret)
goto err_sysfs;
return 0;
err_sysfs:
if (dconn->conn.encoder)
dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
err_conn:
drm_connector_cleanup(&dconn->conn);
err_destroy_dconn:
kfree(dconn);
return ret;
}

Просмотреть файл

@ -0,0 +1,39 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_CONNETOR_H
#define ARMADA_CONNETOR_H
#define encoder_helper_funcs(encoder) \
((struct drm_encoder_helper_funcs *)encoder->helper_private)
struct armada_output_type {
int connector_type;
enum drm_connector_status (*detect)(struct drm_connector *, bool);
int (*create)(struct drm_connector *, const void *);
int (*set_property)(struct drm_connector *, struct drm_property *,
uint64_t);
};
struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
void armada_drm_encoder_prepare(struct drm_encoder *encoder);
void armada_drm_encoder_commit(struct drm_encoder *encoder);
bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode, struct drm_display_mode *adj);
int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
struct drm_display_mode *mode);
int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
struct drm_property *property, uint64_t value);
int armada_output_create(struct drm_device *dev,
const struct armada_output_type *type, const void *data);
#endif

Просмотреть файл

@ -0,0 +1,477 @@
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
struct armada_plane_properties {
uint32_t colorkey_yr;
uint32_t colorkey_ug;
uint32_t colorkey_vb;
#define K2R(val) (((val) >> 0) & 0xff)
#define K2G(val) (((val) >> 8) & 0xff)
#define K2B(val) (((val) >> 16) & 0xff)
int16_t brightness;
uint16_t contrast;
uint16_t saturation;
uint32_t colorkey_mode;
};
struct armada_plane {
struct drm_plane base;
spinlock_t lock;
struct drm_framebuffer *old_fb;
uint32_t src_hw;
uint32_t dst_hw;
uint32_t dst_yx;
uint32_t ctrl0;
struct {
struct armada_vbl_event update;
struct armada_regs regs[13];
wait_queue_head_t wait;
} vbl;
struct armada_plane_properties prop;
};
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
static void
armada_ovl_update_attr(struct armada_plane_properties *prop,
struct armada_crtc *dcrtc)
{
writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
writel_relaxed(prop->brightness << 16 | prop->contrast,
dcrtc->base + LCD_SPU_CONTRAST);
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
writel_relaxed(prop->saturation << 16,
dcrtc->base + LCD_SPU_SATURATION);
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
spin_lock_irq(&dcrtc->irq_lock);
armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
dcrtc->base + LCD_SPU_DMA_CTRL1);
armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
spin_unlock_irq(&dcrtc->irq_lock);
}
/* === Plane support === */
static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
{
struct armada_plane *dplane = data;
struct drm_framebuffer *fb;
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
spin_lock(&dplane->lock);
fb = dplane->old_fb;
dplane->old_fb = NULL;
spin_unlock(&dplane->lock);
if (fb)
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
}
static unsigned armada_limit(int start, unsigned size, unsigned max)
{
int end = start + size;
if (end < 0)
return 0;
if (start < 0)
start = 0;
return (unsigned)end > max ? max - start : end - start;
}
static int
armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
{
struct armada_plane *dplane = drm_to_armada_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
uint32_t val, ctrl0;
unsigned idx = 0;
int ret;
crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
/* Does the position/size result in nothing to display? */
if (crtc_w == 0 || crtc_h == 0) {
ctrl0 &= ~CFG_DMA_ENA;
}
/*
* FIXME: if the starting point is off screen, we need to
* adjust src_x, src_y, src_w, src_h appropriately, and
* according to the scale.
*/
if (!dcrtc->plane) {
dcrtc->plane = plane;
armada_ovl_update_attr(&dplane->prop, dcrtc);
}
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
val = (src_h & 0xffff0000) | src_w >> 16;
dplane->src_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
val = crtc_h << 16 | crtc_w;
dplane->dst_hw = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
val = crtc_y << 16 | crtc_x;
dplane->dst_yx = val;
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
return 0;
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
dcrtc->base + LCD_SPU_SRAM_PARA1);
}
ret = wait_event_timeout(dplane->vbl.wait,
list_empty(&dplane->vbl.update.node),
HZ/25);
if (ret < 0)
return ret;
if (plane->fb != fb) {
struct armada_gem_object *obj = drm_fb_obj(fb);
uint32_t sy, su, sv;
/*
* Take a reference on the new framebuffer - we want to
* hold on to it while the hardware is displaying it.
*/
drm_framebuffer_reference(fb);
if (plane->fb) {
struct drm_framebuffer *older_fb;
spin_lock_irq(&dplane->lock);
older_fb = dplane->old_fb;
dplane->old_fb = plane->fb;
spin_unlock_irq(&dplane->lock);
if (older_fb)
armada_drm_queue_unref_work(dcrtc->crtc.dev,
older_fb);
}
src_y >>= 16;
src_x >>= 16;
sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
src_x * fb->bits_per_pixel / 8;
su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
src_x;
sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
src_x;
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(dplane->vbl.regs, idx, su,
LCD_SPU_DMA_START_ADDR_U0);
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
LCD_SPU_DMA_START_ADDR_V0);
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
LCD_SPU_DMA_START_ADDR_Y1);
armada_reg_queue_set(dplane->vbl.regs, idx, su,
LCD_SPU_DMA_START_ADDR_U1);
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
LCD_SPU_DMA_START_ADDR_V1);
val = fb->pitches[0] << 16 | fb->pitches[0];
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_PITCH_YC);
val = fb->pitches[1] << 16 | fb->pitches[2];
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_PITCH_UV);
}
val = (src_h & 0xffff0000) | src_w >> 16;
if (dplane->src_hw != val) {
dplane->src_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
val = crtc_h << 16 | crtc_w;
if (dplane->dst_hw != val) {
dplane->dst_hw = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
val = crtc_y << 16 | crtc_x;
if (dplane->dst_yx != val) {
dplane->dst_yx = val;
armada_reg_queue_set(dplane->vbl.regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
if (dplane->ctrl0 != ctrl0) {
dplane->ctrl0 = ctrl0;
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
CFG_YUV2RGB) | CFG_DMA_ENA,
LCD_SPU_DMA_CTRL0);
}
if (idx) {
armada_reg_queue_end(dplane->vbl.regs, idx);
armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
}
return 0;
}
static int armada_plane_disable(struct drm_plane *plane)
{
struct armada_plane *dplane = drm_to_armada_plane(plane);
struct drm_framebuffer *fb;
struct armada_crtc *dcrtc;
if (!dplane->base.crtc)
return 0;
dcrtc = drm_to_armada_crtc(dplane->base.crtc);
dcrtc->plane = NULL;
spin_lock_irq(&dcrtc->irq_lock);
armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
dplane->ctrl0 = 0;
spin_unlock_irq(&dcrtc->irq_lock);
/* Power down the Y/U/V FIFOs */
armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
dcrtc->base + LCD_SPU_SRAM_PARA1);
if (plane->fb)
drm_framebuffer_unreference(plane->fb);
spin_lock_irq(&dplane->lock);
fb = dplane->old_fb;
dplane->old_fb = NULL;
spin_unlock_irq(&dplane->lock);
if (fb)
drm_framebuffer_unreference(fb);
return 0;
}
static void armada_plane_destroy(struct drm_plane *plane)
{
kfree(plane);
}
static int armada_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
{
struct armada_private *priv = plane->dev->dev_private;
struct armada_plane *dplane = drm_to_armada_plane(plane);
bool update_attr = false;
if (property == priv->colorkey_prop) {
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
dplane->prop.colorkey_yr = CCC(K2R(val));
dplane->prop.colorkey_ug = CCC(K2G(val));
dplane->prop.colorkey_vb = CCC(K2B(val));
#undef CCC
update_attr = true;
} else if (property == priv->colorkey_min_prop) {
dplane->prop.colorkey_yr &= ~0x00ff0000;
dplane->prop.colorkey_yr |= K2R(val) << 16;
dplane->prop.colorkey_ug &= ~0x00ff0000;
dplane->prop.colorkey_ug |= K2G(val) << 16;
dplane->prop.colorkey_vb &= ~0x00ff0000;
dplane->prop.colorkey_vb |= K2B(val) << 16;
update_attr = true;
} else if (property == priv->colorkey_max_prop) {
dplane->prop.colorkey_yr &= ~0xff000000;
dplane->prop.colorkey_yr |= K2R(val) << 24;
dplane->prop.colorkey_ug &= ~0xff000000;
dplane->prop.colorkey_ug |= K2G(val) << 24;
dplane->prop.colorkey_vb &= ~0xff000000;
dplane->prop.colorkey_vb |= K2B(val) << 24;
update_attr = true;
} else if (property == priv->colorkey_val_prop) {
dplane->prop.colorkey_yr &= ~0x0000ff00;
dplane->prop.colorkey_yr |= K2R(val) << 8;
dplane->prop.colorkey_ug &= ~0x0000ff00;
dplane->prop.colorkey_ug |= K2G(val) << 8;
dplane->prop.colorkey_vb &= ~0x0000ff00;
dplane->prop.colorkey_vb |= K2B(val) << 8;
update_attr = true;
} else if (property == priv->colorkey_alpha_prop) {
dplane->prop.colorkey_yr &= ~0x000000ff;
dplane->prop.colorkey_yr |= K2R(val);
dplane->prop.colorkey_ug &= ~0x000000ff;
dplane->prop.colorkey_ug |= K2G(val);
dplane->prop.colorkey_vb &= ~0x000000ff;
dplane->prop.colorkey_vb |= K2B(val);
update_attr = true;
} else if (property == priv->colorkey_mode_prop) {
dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
dplane->prop.colorkey_mode |= CFG_CKMODE(val);
update_attr = true;
} else if (property == priv->brightness_prop) {
dplane->prop.brightness = val - 256;
update_attr = true;
} else if (property == priv->contrast_prop) {
dplane->prop.contrast = val;
update_attr = true;
} else if (property == priv->saturation_prop) {
dplane->prop.saturation = val;
update_attr = true;
}
if (update_attr && dplane->base.crtc)
armada_ovl_update_attr(&dplane->prop,
drm_to_armada_crtc(dplane->base.crtc));
return 0;
}
static const struct drm_plane_funcs armada_plane_funcs = {
.update_plane = armada_plane_update,
.disable_plane = armada_plane_disable,
.destroy = armada_plane_destroy,
.set_property = armada_plane_set_property,
};
static const uint32_t armada_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU422,
DRM_FORMAT_VYUY,
DRM_FORMAT_YVYU,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
};
static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
{ CKMODE_DISABLE, "disabled" },
{ CKMODE_Y, "Y component" },
{ CKMODE_U, "U component" },
{ CKMODE_V, "V component" },
{ CKMODE_RGB, "RGB" },
{ CKMODE_R, "R component" },
{ CKMODE_G, "G component" },
{ CKMODE_B, "B component" },
};
static int armada_overlay_create_properties(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
if (priv->colorkey_prop)
return 0;
priv->colorkey_prop = drm_property_create_range(dev, 0,
"colorkey", 0, 0xffffff);
priv->colorkey_min_prop = drm_property_create_range(dev, 0,
"colorkey_min", 0, 0xffffff);
priv->colorkey_max_prop = drm_property_create_range(dev, 0,
"colorkey_max", 0, 0xffffff);
priv->colorkey_val_prop = drm_property_create_range(dev, 0,
"colorkey_val", 0, 0xffffff);
priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
"colorkey_alpha", 0, 0xffffff);
priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
"colorkey_mode",
armada_drm_colorkey_enum_list,
ARRAY_SIZE(armada_drm_colorkey_enum_list));
priv->brightness_prop = drm_property_create_range(dev, 0,
"brightness", 0, 256 + 255);
priv->contrast_prop = drm_property_create_range(dev, 0,
"contrast", 0, 0x7fff);
priv->saturation_prop = drm_property_create_range(dev, 0,
"saturation", 0, 0x7fff);
if (!priv->colorkey_prop)
return -ENOMEM;
return 0;
}
int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
struct armada_private *priv = dev->dev_private;
struct drm_mode_object *mobj;
struct armada_plane *dplane;
int ret;
ret = armada_overlay_create_properties(dev);
if (ret)
return ret;
dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
if (!dplane)
return -ENOMEM;
spin_lock_init(&dplane->lock);
init_waitqueue_head(&dplane->vbl.wait);
armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
dplane);
drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
armada_formats, ARRAY_SIZE(armada_formats), false);
dplane->prop.colorkey_yr = 0xfefefe00;
dplane->prop.colorkey_ug = 0x01010100;
dplane->prop.colorkey_vb = 0x01010100;
dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
dplane->prop.brightness = 0;
dplane->prop.contrast = 0x4000;
dplane->prop.saturation = 0x4000;
mobj = &dplane->base.base;
drm_object_attach_property(mobj, priv->colorkey_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_min_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_max_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_val_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
0x000000);
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
CKMODE_RGB);
drm_object_attach_property(mobj, priv->brightness_prop, 256);
drm_object_attach_property(mobj, priv->contrast_prop,
dplane->prop.contrast);
drm_object_attach_property(mobj, priv->saturation_prop,
dplane->prop.saturation);
return 0;
}

Просмотреть файл

@ -0,0 +1,139 @@
/*
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
#include "armada_drm.h"
#include "armada_output.h"
#include "armada_slave.h"
static int armada_drm_slave_get_modes(struct drm_connector *conn)
{
struct drm_encoder *enc = armada_drm_connector_encoder(conn);
int count = 0;
if (enc) {
struct drm_encoder_slave *slave = to_encoder_slave(enc);
count = slave->slave_funcs->get_modes(enc, conn);
}
return count;
}
static void armada_drm_slave_destroy(struct drm_encoder *enc)
{
struct drm_encoder_slave *slave = to_encoder_slave(enc);
struct i2c_client *client = drm_i2c_encoder_get_client(enc);
if (slave->slave_funcs)
slave->slave_funcs->destroy(enc);
if (client)
i2c_put_adapter(client->adapter);
drm_encoder_cleanup(&slave->base);
kfree(slave);
}
static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
.destroy = armada_drm_slave_destroy,
};
static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
.get_modes = armada_drm_slave_get_modes,
.mode_valid = armada_drm_slave_encoder_mode_valid,
.best_encoder = armada_drm_connector_encoder,
};
static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
.dpms = drm_i2c_encoder_dpms,
.save = drm_i2c_encoder_save,
.restore = drm_i2c_encoder_restore,
.mode_fixup = drm_i2c_encoder_mode_fixup,
.prepare = drm_i2c_encoder_prepare,
.commit = drm_i2c_encoder_commit,
.mode_set = drm_i2c_encoder_mode_set,
.detect = drm_i2c_encoder_detect,
};
static int
armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
{
const struct armada_drm_slave_config *config = data;
struct drm_encoder_slave *slave;
struct i2c_adapter *adap;
int ret;
conn->interlace_allowed = config->interlace_allowed;
conn->doublescan_allowed = config->doublescan_allowed;
conn->polled = config->polled;
drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave)
return -ENOMEM;
slave->base.possible_crtcs = config->crtcs;
adap = i2c_get_adapter(config->i2c_adapter_id);
if (!adap) {
kfree(slave);
return -EPROBE_DEFER;
}
ret = drm_encoder_init(conn->dev, &slave->base,
&armada_drm_slave_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("unable to init encoder\n");
i2c_put_adapter(adap);
kfree(slave);
return ret;
}
ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
i2c_put_adapter(adap);
if (ret) {
DRM_ERROR("unable to init encoder slave\n");
armada_drm_slave_destroy(&slave->base);
return ret;
}
drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
ret = slave->slave_funcs->create_resources(&slave->base, conn);
if (ret) {
armada_drm_slave_destroy(&slave->base);
return ret;
}
ret = drm_mode_connector_attach_encoder(conn, &slave->base);
if (ret) {
armada_drm_slave_destroy(&slave->base);
return ret;
}
conn->encoder = &slave->base;
return ret;
}
static const struct armada_output_type armada_drm_conn_slave = {
.connector_type = DRM_MODE_CONNECTOR_HDMIA,
.create = armada_drm_conn_slave_create,
.set_property = armada_drm_slave_encoder_set_property,
};
int armada_drm_connector_slave_create(struct drm_device *dev,
const struct armada_drm_slave_config *config)
{
return armada_output_create(dev, &armada_drm_conn_slave, config);
}

Просмотреть файл

@ -0,0 +1,26 @@
/*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARMADA_SLAVE_H
#define ARMADA_SLAVE_H
#include <linux/i2c.h>
#include <drm/drmP.h>
struct armada_drm_slave_config {
int i2c_adapter_id;
uint32_t crtcs;
uint8_t polled;
bool interlace_allowed;
bool doublescan_allowed;
struct i2c_board_info info;
};
int armada_drm_connector_slave_create(struct drm_device *dev,
const struct armada_drm_slave_config *);
#endif

Просмотреть файл

@ -6,6 +6,7 @@ config DRM_AST
select FB_SYS_FILLRECT
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable

Просмотреть файл

@ -211,7 +211,6 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.gem_init_object = ast_gem_init_object,
.gem_free_object = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,

Просмотреть файл

@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
extern int ast_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,

Просмотреть файл

@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
int ast_gem_init_object(struct drm_gem_object *obj)
{
BUG();
return 0;
}
void ast_bo_unref(struct ast_bo **bo)
{
struct ttm_buffer_object *tbo;

Просмотреть файл

@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.

Просмотреть файл

@ -97,7 +97,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.gem_init_object = cirrus_gem_init_object,
.gem_free_object = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,

Просмотреть файл

@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
struct pci_dev *pdev,
uint32_t flags);
void cirrus_device_fini(struct cirrus_device *cdev);
int cirrus_gem_init_object(struct drm_gem_object *obj);
void cirrus_gem_free_object(struct drm_gem_object *obj);
int cirrus_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,

Просмотреть файл

@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
int cirrus_gem_init_object(struct drm_gem_object *obj)
{
BUG();
return 0;
}
void cirrus_bo_unref(struct cirrus_bo **bo)
{
struct ttm_buffer_object *tbo;

Просмотреть файл

@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
int cirrus_vga_get_modes(struct drm_connector *connector)
{
/* Just add a static list of modes */
drm_add_modes_noedid(connector, 640, 480);
drm_add_modes_noedid(connector, 800, 600);
drm_add_modes_noedid(connector, 1024, 768);
drm_add_modes_noedid(connector, 1280, 1024);
int count;
return 4;
/* Just add a static list of modes */
count = drm_add_modes_noedid(connector, 1280, 1024);
drm_set_preferred_mode(connector, 1024, 768);
return count;
}
static int cirrus_vga_mode_valid(struct drm_connector *connector,

Просмотреть файл

@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
return 0;
@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (pos->handle == ctx->handle) {
list_del(&pos->head);
kfree(pos);
--dev->ctx_count;
}
}
}

Просмотреть файл

@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
};
void drm_connector_ida_init(void)
@ -1301,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
}
/**
* drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
* drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
return -EINVAL;
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@ -1552,7 +1557,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
@ -1579,6 +1584,19 @@ out:
return ret;
}
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
const struct drm_file *file_priv)
{
/*
* If user-space hasn't configured the driver to expose the stereo 3D
* modes, don't expose them.
*/
if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
return false;
return true;
}
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@ -1623,7 +1641,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, out_resp->connector_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head)
mode_count++;
if (drm_mode_expose_to_userspace(mode, file_priv))
mode_count++;
out_resp->connector_id = connector->base.id;
out_resp->connector_type = connector->connector_type;
@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
if (!drm_mode_expose_to_userspace(mode, file_priv))
continue;
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
@ -1735,7 +1757,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
encoder = obj_to_encoder(obj);
@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
/*
* Checks that the framebuffer is big enough for the CRTC viewport
* (x, y, hdisplay, vdisplay)
*/
static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb)
{
int hdisplay, vdisplay;
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
if (drm_mode_is_stereo(mode)) {
struct drm_display_mode adjusted = *mode;
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
hdisplay = adjusted.crtc_hdisplay;
vdisplay = adjusted.crtc_vdisplay;
}
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
if (hdisplay > fb->width ||
vdisplay > fb->height ||
x > fb->width - hdisplay ||
y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height, hdisplay, vdisplay, x, y,
crtc->invert_dimensions ? " (inverted)" : "");
return -ENOSPC;
}
return 0;
}
/**
* drm_mode_setcrtc - set CRTC configuration
* @dev: drm device for the ioctl
@ -2080,14 +2141,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
@ -2104,7 +2164,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
}
@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
if (hdisplay > fb->width ||
vdisplay > fb->height ||
crtc_req->x > fb->width - hdisplay ||
crtc_req->y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height,
hdisplay, vdisplay, crtc_req->x, crtc_req->y,
crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
mode, fb);
if (ret)
goto out;
}
}
if (crtc_req->count_connectors == 0 && mode) {
@ -2184,7 +2232,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!obj) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
@ -2232,7 +2280,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -EINVAL;
return -ENOENT;
}
crtc = obj_to_crtc(obj);
@ -2441,6 +2489,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
case DRM_FORMAT_YVU444:
return 0;
default:
DRM_DEBUG_KMS("invalid pixel format %s\n",
drm_get_format_name(r->pixel_format));
return -EINVAL;
}
}
@ -2606,7 +2656,7 @@ fail_lookup:
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
return -EINVAL;
return -ENOENT;
}
/**
@ -2634,7 +2684,7 @@ int drm_mode_getfb(struct drm_device *dev,
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
return -EINVAL;
return -ENOENT;
r->height = fb->height;
r->width = fb->width;
@ -2679,7 +2729,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
return -EINVAL;
return -ENOENT;
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@ -3011,7 +3061,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto done;
}
property = obj_to_property(obj);
@ -3140,7 +3190,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto done;
}
blob = obj_to_blob(obj);
@ -3301,7 +3351,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
if (!obj->properties) {
@ -3354,8 +3404,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
if (!arg_obj) {
ret = -ENOENT;
goto out;
}
if (!arg_obj->properties)
goto out;
@ -3368,8 +3420,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
if (!prop_obj)
if (!prop_obj) {
ret = -ENOENT;
goto out;
}
property = obj_to_property(prop_obj);
if (!drm_property_change_is_valid(property, arg->value))
@ -3454,7 +3508,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
@ -3513,7 +3567,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
@ -3556,7 +3610,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int hdisplay, vdisplay;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@ -3568,7 +3621,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
return -EINVAL;
return -ENOENT;
crtc = obj_to_crtc(obj);
mutex_lock(&crtc->mutex);
@ -3585,26 +3638,15 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
if (!fb)
goto out;
hdisplay = crtc->mode.hdisplay;
vdisplay = crtc->mode.vdisplay;
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
if (hdisplay > fb->width ||
vdisplay > fb->height ||
crtc->x > fb->width - hdisplay ||
crtc->y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
if (!fb) {
ret = -ENOENT;
goto out;
}
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
if (ret)
goto out;
if (crtc->fb->pixel_format != fb->pixel_format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
@ -3788,7 +3830,8 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
*bpp = 32;
break;
default:
DRM_DEBUG_KMS("unsupported pixel format\n");
DRM_DEBUG_KMS("unsupported pixel format %s\n",
drm_get_format_name(format));
*depth = 0;
*bpp = 0;
break;

Просмотреть файл

@ -39,6 +39,10 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
{
struct drm_display_mode *mode;
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
DRM_MODE_FLAG_3D_MASK))
return;
list_for_each_entry(mode, &connector->modes, head) {
@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
mode->status = MODE_NO_STEREO;
}
return;
@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->probe() @connector
* callback for drivers that use the crtc helpers for output mode filtering and
* detection.
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* RETURNS:
* Number of modes found on @connector.
@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
@ -395,22 +405,25 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
bool saved_enabled;
struct drm_encoder *encoder;
bool ret = true;
saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
adjusted_mode = drm_mode_duplicate(dev, mode);
if (!adjusted_mode)
if (!adjusted_mode) {
crtc->enabled = saved_enabled;
return false;
}
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
@ -529,7 +542,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
crtc->hwmode = saved_hwmode;
crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
@ -557,6 +570,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
continue;
connector->encoder = NULL;
/*
* drm_helper_disable_unused_functions() ought to be
* doing this, but since we've decoupled the encoder
* from the connector above, the required connection
* between them is henceforth no longer available.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
}
}
@ -583,9 +604,8 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_crtc *save_crtcs, *new_crtc, *crtc;
struct drm_crtc *new_crtc;
struct drm_encoder *save_encoders, *new_encoder, *encoder;
struct drm_framebuffer *old_fb = NULL;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
@ -621,37 +641,27 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
dev = set->crtc->dev;
/* Allocate space for the backup of all (non-pointer) crtc, encoder and
* connector data. */
save_crtcs = kzalloc(dev->mode_config.num_crtc *
sizeof(struct drm_crtc), GFP_KERNEL);
if (!save_crtcs)
return -ENOMEM;
/*
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
save_encoders = kzalloc(dev->mode_config.num_encoder *
sizeof(struct drm_encoder), GFP_KERNEL);
if (!save_encoders) {
kfree(save_crtcs);
if (!save_encoders)
return -ENOMEM;
}
save_connectors = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_connector), GFP_KERNEL);
if (!save_connectors) {
kfree(save_crtcs);
kfree(save_encoders);
return -ENOMEM;
}
/* Copy data. Note that driver private data is not affected.
/*
* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
save_crtcs[count++] = *crtc;
}
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
save_encoders[count++] = *encoder;
@ -775,19 +785,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
if (mode_changed) {
set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
if (set->crtc->enabled) {
if (drm_helper_crtc_in_use(set->crtc)) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
old_fb = set->crtc->fb;
set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
set->crtc->fb = old_fb;
set->crtc->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
@ -802,30 +810,23 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
old_fb = set->crtc->fb;
if (set->crtc->fb != set->fb)
set->crtc->fb = set->fb;
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
set->x, set->y, save_set.fb);
if (ret != 0) {
set->crtc->fb = old_fb;
set->crtc->x = save_set.x;
set->crtc->y = save_set.y;
set->crtc->fb = save_set.fb;
goto fail;
}
}
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
*crtc = save_crtcs[count++];
}
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
*encoder = save_encoders[count++];
@ -844,7 +845,6 @@ fail:
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
@ -1125,14 +1125,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
if (!dev->mode_config.poll_enabled)
return;
return false;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@ -1157,5 +1157,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
if (changed)
drm_kms_helper_hotplug_event(dev);
return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);

Просмотреть файл

@ -42,7 +42,7 @@
* Initialization, etc.
**************************************************/
static struct drm_info_list drm_debugfs_list[] = {
static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
@ -84,7 +84,7 @@ static const struct file_operations drm_debugfs_fops = {
* Create a given set of debugfs files represented by an array of
* gdm_debugfs_lists in the given root directory.
*/
int drm_debugfs_create_files(struct drm_info_list *files, int count,
int drm_debugfs_create_files(const struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
@ -188,7 +188,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
*
* Remove all debugfs entries created by debugfs_init().
*/
int drm_debugfs_remove_files(struct drm_info_list *files, int count,
int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;

Просмотреть файл

@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
/* Helpers for DP link training */
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
return (l >> s) & 0xf;
}
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else

Просмотреть файл

@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
* drm_legacy_dev_reinit
*
* Reinitializes a legacy/ums drm device in it's lastclose function.
*/
static void drm_legacy_dev_reinit(struct drm_device *dev)
{
int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
dev->sigdata.lock = NULL;
dev->context_flag = 0;
dev->last_context = 0;
dev->if_version = 0;
}
/**
* Take down the DRM device.
*
* \param dev DRM device structure.
*
* Frees every resource in \p dev.
*
* \sa drm_device
*/
int drm_lastclose(struct drm_device * dev)
{
struct drm_vma_entry *vma, *vma_temp;
DRM_DEBUG("\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
drm_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
drm_legacy_dma_takedown(dev);
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
drm_legacy_dev_reinit(dev);
DRM_DEBUG("lastclose completed\n");
return 0;
}
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
return -ENODEV;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@ -473,7 +403,7 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->device),
file_priv->authenticated, cmd, nr);

Просмотреть файл

@ -458,6 +458,15 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
/*
* These more or less come from the DMT spec. The 720x400 modes are
* inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
* modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
* should be 1152x870, again for the Mac, but instead we use the x864 DMT
* mode.
*
* The DMT modes have been fact-checked; the rest are mild guesses.
*/
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
@ -560,7 +569,7 @@ static const struct minimode est3_modes[] = {
{ 1600, 1200, 75, 0 },
{ 1600, 1200, 85, 0 },
{ 1792, 1344, 60, 0 },
{ 1792, 1344, 85, 0 },
{ 1792, 1344, 75, 0 },
{ 1856, 1392, 60, 0 },
{ 1856, 1392, 75, 0 },
{ 1920, 1200, 60, 1 },
@ -1264,6 +1273,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
* Return duplicate edid or NULL on allocation failure.
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
}
EXPORT_SYMBOL(drm_edid_duplicate);
/*** EDID parsing ***/
/**
@ -1308,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
}
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
/**
* edid_fixup_preferred - set preferred modes based on quirk list
@ -1323,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
{
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
if (list_empty(&connector->probed_modes))
return;
@ -1345,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
cur_vrefresh = cur_mode->vrefresh ?
cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
preferred_vrefresh = preferred_mode->vrefresh ?
preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_mode, target_refresh) <
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
preferred_mode = cur_mode;
}
}
@ -2068,7 +2094,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
u8 *est = ((u8 *)timing) + 5;
for (i = 0; i < 6; i++) {
for (j = 7; j > 0; j--) {
for (j = 7; j >= 0; j--) {
m = (i * 8) + (7 - j);
if (m >= ARRAY_SIZE(est3_modes))
break;
@ -2404,7 +2430,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks(to_match, cea_mode))
drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1;
}
return 0;
@ -2453,7 +2479,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks(to_match, hdmi_mode))
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1;
}
return 0;
@ -2507,6 +2533,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
if (!newmode)
continue;
/* Carry over the stereo flags */
newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
/*
* The current mode could be either variant. Make
* sure to pick the "other" clock for the new mode.
@ -2553,20 +2582,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
return modes;
}
struct stereo_mandatory_mode {
int width, height, vrefresh;
unsigned int flags;
};
static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
{ 1920, 1080, 50,
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
{ 1920, 1080, 60,
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
{ 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
{ 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
};
static bool
stereo_match_mandatory(const struct drm_display_mode *mode,
const struct stereo_mandatory_mode *stereo_mode)
{
unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
return mode->hdisplay == stereo_mode->width &&
mode->vdisplay == stereo_mode->height &&
interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
}
static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
const struct drm_display_mode *mode;
struct list_head stereo_modes;
int modes = 0, i;
INIT_LIST_HEAD(&stereo_modes);
list_for_each_entry(mode, &connector->probed_modes, head) {
for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
const struct stereo_mandatory_mode *mandatory;
struct drm_display_mode *new_mode;
if (!stereo_match_mandatory(mode,
&stereo_mandatory_modes[i]))
continue;
mandatory = &stereo_mandatory_modes[i];
new_mode = drm_mode_duplicate(dev, mode);
if (!new_mode)
continue;
new_mode->flags |= mandatory->flags;
list_add_tail(&new_mode->head, &stereo_modes);
modes++;
}
}
list_splice_tail(&stereo_modes, &connector->probed_modes);
return modes;
}
static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
return 0;
}
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
return 0;
drm_mode_probed_add(connector, newmode);
return 1;
}
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
const u8 *video_db, u8 video_len, u8 video_index)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
int modes = 0;
u8 cea_mode;
if (video_db == NULL || video_index > video_len)
return 0;
/* CEA modes are numbered 1..127 */
cea_mode = (video_db[video_index] & 127) - 1;
if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
return 0;
if (structure & (1 << 0)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (structure & (1 << 6)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (structure & (1 << 8)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) {
newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
return modes;
}
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
* Parses the HDMI VSDB looking for modes to add to @connector.
* Parses the HDMI VSDB looking for modes to add to @connector. This function
* also adds the stereo 3d modes when applicable.
*/
static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
const u8 *video_db, u8 video_len)
{
struct drm_device *dev = connector->dev;
int modes = 0, offset = 0, i;
u8 vic_len;
int modes = 0, offset = 0, i, multi_present = 0;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
u16 structure_all;
if (len < 8)
goto out;
@ -2585,30 +2745,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
offset += 2;
if (len < (8 + offset))
if (len < (8 + offset + 2))
goto out;
/* 3D_Present */
offset++;
if (db[8 + offset] & (1 << 7)) {
modes += add_hdmi_mandatory_stereo_modes(connector);
/* 3D_Multi_present */
multi_present = (db[8 + offset] & 0x60) >> 5;
}
offset++;
vic_len = db[8 + offset] >> 5;
hdmi_3d_len = db[8 + offset] & 0x1f;
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
struct drm_display_mode *newmode;
u8 vic;
vic = db[9 + offset + i];
modes += add_hdmi_mode(connector, vic);
}
offset += 1 + vic_len;
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
continue;
}
if (!(multi_present == 1 || multi_present == 2))
goto out;
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
continue;
if ((multi_present == 1 && len < (9 + offset)) ||
(multi_present == 2 && len < (11 + offset)))
goto out;
drm_mode_probed_add(connector, newmode);
modes++;
if ((multi_present == 1 && hdmi_3d_len < 2) ||
(multi_present == 2 && hdmi_3d_len < 4))
goto out;
/* 3D_Structure_ALL */
structure_all = (db[8 + offset] << 8) | db[9 + offset];
/* check if 3D_MASK is present */
if (multi_present == 2)
mask = (db[10 + offset] << 8) | db[11 + offset];
else
mask = 0xffff;
for (i = 0; i < 16; i++) {
if (mask & (1 << i))
modes += add_3d_struct_modes(connector,
structure_all,
video_db,
video_len, i);
}
out:
@ -2668,8 +2854,8 @@ static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
const u8 *cea = drm_find_cea_extension(edid);
const u8 *db;
u8 dbl;
const u8 *db, *hdmi = NULL, *video = NULL;
u8 dbl, hdmi_len, video_len = 0;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
@ -2682,13 +2868,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
db = &cea[i];
dbl = cea_db_payload_len(db);
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes(connector, db + 1, dbl);
else if (cea_db_is_hdmi_vsdb(db))
modes += do_hdmi_vsdb_modes(connector, db, dbl);
if (cea_db_tag(db) == VIDEO_BLOCK) {
video = db + 1;
video_len = dbl;
modes += do_cea_modes(connector, video, dbl);
}
else if (cea_db_is_hdmi_vsdb(db)) {
hdmi = db;
hdmi_len = dbl;
}
}
}
/*
* We parse the HDMI VSDB after having added the cea modes as we will
* be patching their flags when the sink supports stereo 3D.
*/
if (hdmi)
modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
video_len);
return modes;
}
@ -3288,6 +3487,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (drm_mode_width(mode) == hpref &&
drm_mode_height(mode) == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
}
EXPORT_SYMBOL(drm_set_preferred_mode);
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@ -3321,6 +3533,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
switch (layout) {
case DRM_MODE_FLAG_3D_FRAME_PACKING:
return HDMI_3D_STRUCTURE_FRAME_PACKING;
case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
case DRM_MODE_FLAG_3D_L_DEPTH:
return HDMI_3D_STRUCTURE_L_DEPTH;
case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
default:
return HDMI_3D_STRUCTURE_INVALID;
}
}
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
@ -3338,20 +3577,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode)
{
int err;
u32 s3d_flags;
u8 vic;
if (!frame || !mode)
return -EINVAL;
vic = drm_match_hdmi_mode(mode);
if (!vic)
s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
if (!vic && !s3d_flags)
return -EINVAL;
if (vic && s3d_flags)
return -EINVAL;
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
frame->vic = vic;
if (vic)
frame->vic = vic;
else
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
}

Просмотреть файл

@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
#define GENERIC_EDIDS 5
static char *generic_edid_name[GENERIC_EDIDS] = {
static const char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
"edid/1920x1080.bin",
};
static u8 generic_edid[GENERIC_EDIDS][128] = {
static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
},
};
static int edid_size(const u8 *edid, int data_size)
{
if (data_size < EDID_LENGTH)
return 0;
return (edid[0x7e] + 1) * EDID_LENGTH;
}
static u8 *edid_load(struct drm_connector *connector, const char *name,
const char *connector_name)
{
const struct firmware *fw;
struct platform_device *pdev;
u8 *fwdata = NULL, *edid, *new_edid;
int fwsize, expected;
int builtin = 0, err = 0;
const struct firmware *fw = NULL;
const u8 *fwdata;
u8 *edid;
int fwsize, builtin;
int i, valid_extensions = 0;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
if (IS_ERR(pdev)) {
DRM_ERROR("Failed to register EDID firmware platform device "
"for connector \"%s\"\n", connector_name);
err = -EINVAL;
goto out;
}
err = request_firmware(&fw, name, &pdev->dev);
platform_device_unregister(pdev);
if (err) {
i = 0;
while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
i++;
if (i < GENERIC_EDIDS) {
err = 0;
builtin = 1;
builtin = 0;
for (i = 0; i < GENERIC_EDIDS; i++) {
if (strcmp(name, generic_edid_name[i]) == 0) {
fwdata = generic_edid[i];
fwsize = sizeof(generic_edid[i]);
builtin = 1;
break;
}
}
if (!builtin) {
struct platform_device *pdev;
int err;
if (err) {
DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
name, err);
goto out;
}
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
if (IS_ERR(pdev)) {
DRM_ERROR("Failed to register EDID firmware platform device "
"for connector \"%s\"\n", connector_name);
return ERR_CAST(pdev);
}
if (fwdata == NULL) {
fwdata = (u8 *) fw->data;
err = request_firmware(&fw, name, &pdev->dev);
platform_device_unregister(pdev);
if (err) {
DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
name, err);
return ERR_PTR(err);
}
fwdata = fw->data;
fwsize = fw->size;
}
expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
if (expected != fwsize) {
if (edid_size(fwdata, fwsize) != fwsize) {
DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
"(expected %d, got %d)\n", name, expected, (int) fwsize);
err = -EINVAL;
goto relfw_out;
"(expected %d, got %d\n", name,
edid_size(fwdata, fwsize), (int)fwsize);
edid = ERR_PTR(-EINVAL);
goto out;
}
edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
if (edid == NULL) {
err = -ENOMEM;
goto relfw_out;
edid = ERR_PTR(-ENOMEM);
goto out;
}
if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
err = -EINVAL;
goto relfw_out;
edid = ERR_PTR(-EINVAL);
goto out;
}
for (i = 1; i <= edid[0x7e]; i++) {
@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
}
if (valid_extensions != edid[0x7e]) {
u8 *new_edid;
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
DRM_INFO("Found %d valid extensions instead of %d in EDID data "
"\"%s\" for connector \"%s\"\n", valid_extensions,
edid[0x7e], name, connector_name);
edid[0x7e] = valid_extensions;
new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
GFP_KERNEL);
if (new_edid == NULL) {
err = -ENOMEM;
kfree(edid);
goto relfw_out;
}
edid = new_edid;
GFP_KERNEL);
if (new_edid)
edid = new_edid;
}
DRM_INFO("Got %s EDID base block and %d extension%s from "
@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
name, connector_name);
relfw_out:
release_firmware(fw);
out:
if (err)
return ERR_PTR(err);
if (fw)
release_firmware(fw);
return edid;
}

Просмотреть файл

@ -39,10 +39,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
/**
@ -844,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
int ret = 0;
int i;
@ -855,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
}
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->x = var->xoffset;
@ -1352,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *best_crtc;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@ -1364,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
connector = fb_helper_conn->connector;
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
@ -1413,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
@ -1580,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
u32 max_width, max_height;
if (!fb_helper->fb)
return 0;
@ -1596,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
max_width = fb_helper->fb->width;
max_height = fb_helper->fb->height;
bpp_sel = fb_helper->fb->bits_per_pixel;
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
drm_modeset_lock_all(dev);

Просмотреть файл

@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(inode, filp, dev);
if (retcode)
goto err_undo;
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
if (need_setup) {
retcode = drm_setup(dev);
if (retcode)
@ -235,7 +234,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->always_authenticated = capable(CAP_SYS_ADMIN);
priv->authenticated = priv->always_authenticated;
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
@ -374,12 +374,79 @@ static void drm_events_release(struct drm_file *file_priv)
}
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link)
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
list_del(&e->link);
e->destroy(e);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
/**
* drm_legacy_dev_reinit
*
* Reinitializes a legacy/ums drm device in it's lastclose function.
*/
static void drm_legacy_dev_reinit(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->sigdata.lock = NULL;
dev->context_flag = 0;
dev->last_context = 0;
dev->if_version = 0;
}
/**
* Take down the DRM device.
*
* \param dev DRM device structure.
*
* Frees every resource in \p dev.
*
* \sa drm_device
*/
int drm_lastclose(struct drm_device * dev)
{
struct drm_vma_entry *vma, *vma_temp;
DRM_DEBUG("\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
drm_agp_clear(dev);
drm_legacy_sg_cleanup(dev);
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
drm_legacy_dma_takedown(dev);
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
drm_legacy_dev_reinit(dev);
DRM_DEBUG("lastclose completed\n");
return 0;
}
/**
* Release file.
*
@ -449,7 +516,6 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&pos->head);
kfree(pos);
--dev->ctx_count;
}
}
}
@ -463,7 +529,7 @@ int drm_release(struct inode *inode, struct file *filp)
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
temp->authenticated = 0;
temp->authenticated = temp->always_authenticated;
}
/**
@ -511,7 +577,6 @@ int drm_release(struct inode *inode, struct file *filp)
* End inline drm_release
*/
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",

Просмотреть файл

@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
fput(obj->filp);
free:
kfree(obj);
return NULL;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{

Просмотреть файл

@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
struct drm_global_item *item = &glob[ref->global_type];
void *object;
mutex_lock(&item->mutex);
if (item->refcount == 0) {
@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
}
++item->refcount;
ref->object = item->object;
object = item->object;
mutex_unlock(&item->mutex);
return 0;
out_err:

Просмотреть файл

@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
seq_printf(m, "CRTC %d enable: %d\n",
crtc, atomic_read(&dev->vblank_refcount[crtc]));
crtc, atomic_read(&dev->vblank[crtc].refcount));
seq_printf(m, "CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
seq_printf(m, "CRTC %d last wait: %d\n",
crtc, dev->last_vblank_wait[crtc]);
crtc, dev->vblank[crtc].last_wait);
seq_printf(m, "CRTC %d in modeset: %d\n",
crtc, dev->vblank_inmodeset[crtc]);
crtc, dev->vblank[crtc].inmodeset);
}
mutex_unlock(&dev->struct_mutex);
return 0;

Просмотреть файл

@ -302,6 +302,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return 0;
}
/**
* Set device/driver capabilities
*/
int
drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_set_client_cap *req = data;
switch (req->capability) {
case DRM_CLIENT_CAP_STEREO_3D:
if (req->value > 1)
return -EINVAL;
file_priv->stereo_allowed = req->value;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* Setversion ioctl.
*

Просмотреть файл

@ -43,9 +43,8 @@
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) ( \
(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
((count) % DRM_VBLANKTIME_RBSIZE)])
#define vblanktimestamp(dev, crtc, count) \
((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
*/
static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
{
memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
}
/*
@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
dev->driver->disable_vblank(dev, crtc);
dev->vblank_enabled[crtc] = 0;
dev->vblank[crtc].enabled = false;
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* delayed gpu counter increment.
*/
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
} while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
if (!count)
vblrc = 0;
@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
*/
vblcount = atomic_read(&dev->_vblank_count[crtc]);
vblcount = atomic_read(&dev->vblank[crtc].count);
diff_ns = timeval_to_ns(&tvblank) -
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* hope for the best.
*/
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->_vblank_count[crtc]);
atomic_inc(&dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
}
@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
if (atomic_read(&dev->vblank[i].refcount) == 0 &&
dev->vblank[i].enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
vblank_disable_and_save(dev, i);
}
@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
vblank_disable_fn((unsigned long)dev);
kfree(dev->vbl_queue);
kfree(dev->_vblank_count);
kfree(dev->vblank_refcount);
kfree(dev->vblank_enabled);
kfree(dev->last_vblank);
kfree(dev->last_vblank_wait);
kfree(dev->vblank_inmodeset);
kfree(dev->_vblank_time);
kfree(dev->vblank);
dev->num_crtcs = 0;
}
@ -221,42 +212,14 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
dev->num_crtcs = num_crtcs;
dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
GFP_KERNEL);
if (!dev->vbl_queue)
dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
if (!dev->vblank)
goto err;
dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
if (!dev->_vblank_count)
goto err;
for (i = 0; i < num_crtcs; i++)
init_waitqueue_head(&dev->vblank[i].queue);
dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
GFP_KERNEL);
if (!dev->vblank_refcount)
goto err;
dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
if (!dev->vblank_enabled)
goto err;
dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
if (!dev->last_vblank)
goto err;
dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
if (!dev->last_vblank_wait)
goto err;
dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
if (!dev->vblank_inmodeset)
goto err;
dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
sizeof(struct timeval), GFP_KERNEL);
if (!dev->_vblank_time)
goto err;
DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
/* Driver specific high-precision vblank timestamping supported? */
if (dev->driver->get_vblank_timestamp)
@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
else
DRM_INFO("No driver support for vblank timestamp query.\n");
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
init_waitqueue_head(&dev->vbl_queue[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
dev->vblank_disable_allowed = false;
dev->vblank_disable_allowed = 0;
return 0;
err:
@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
dev->irq_enabled = 1;
dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
return ret;
}
@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
int irq_enabled, i;
bool irq_enabled;
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
irq_enabled = dev->irq_enabled;
dev->irq_enabled = 0;
dev->irq_enabled = false;
mutex_unlock(&dev->struct_mutex);
/*
@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
DRM_WAKEUP(&dev->vbl_queue[i]);
dev->vblank_enabled[i] = 0;
dev->last_vblank[i] =
DRM_WAKEUP(&dev->vblank[i].queue);
dev->vblank[i].enabled = false;
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@ -628,24 +586,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* code gets preempted or delayed for some reason.
*/
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
/* Disable preemption to make it very likely to
* succeed in the first iteration even on PREEMPT_RT kernel.
/*
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
preempt_disable();
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
&hpos, &stime, &etime);
/* Get system timestamp before query. */
stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
etime = ktime_get();
/*
* Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
* CLOCK_REALTIME is requested.
*/
if (!drm_timestamp_monotonic)
mono_time_offset = ktime_get_monotonic_offset();
preempt_enable();
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@ -653,6 +607,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
/* Compute uncertainty in timestamp of scanout position query. */
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
@ -795,7 +750,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
return atomic_read(&dev->_vblank_count[crtc]);
return atomic_read(&dev->vblank[crtc].count);
}
EXPORT_SYMBOL(drm_vblank_count);
@ -824,10 +779,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
* a seqlock.
*/
do {
cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
cur_vblank = atomic_read(&dev->vblank[crtc].count);
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
smp_rmb();
} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
} while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
return cur_vblank;
}
@ -914,12 +869,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
/* Deal with counter wrap */
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff = cur_vblank - dev->vblank[crtc].last;
if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->last_vblank[crtc], cur_vblank, diff);
crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@ -930,12 +885,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* reinitialize delayed at next vblank interrupt in that case.
*/
if (rc) {
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
tslot = atomic_read(&dev->vblank[crtc].count) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
atomic_add(diff, &dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
}
@ -957,9 +912,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank_enabled[crtc]) {
if (!dev->vblank[crtc].enabled) {
/* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
@ -970,16 +925,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
crtc, ret);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
atomic_dec(&dev->vblank[crtc].refcount);
else {
dev->vblank_enabled[crtc] = 1;
dev->vblank[crtc].enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
} else {
if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
if (!dev->vblank[crtc].enabled) {
atomic_dec(&dev->vblank[crtc].refcount);
ret = -EINVAL;
}
}
@ -999,10 +954,10 @@ EXPORT_SYMBOL(drm_vblank_get);
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
(drm_vblank_offdelay > 0))
mod_timer(&dev->vblank_disable_timer,
jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@ -1025,7 +980,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
DRM_WAKEUP(&dev->vblank[crtc].queue);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
@ -1067,10 +1022,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 0x1;
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
dev->vblank_inmodeset[crtc] |= 0x2;
dev->vblank[crtc].inmodeset |= 0x2;
}
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
@ -1083,15 +1038,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
if (!dev->num_crtcs)
return;
if (dev->vblank_inmodeset[crtc]) {
if (dev->vblank[crtc].inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (dev->vblank_inmodeset[crtc] & 0x2)
if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
dev->vblank_inmodeset[crtc] = 0;
dev->vblank[crtc].inmodeset = 0;
}
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
@ -1288,8 +1243,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
dev->last_vblank_wait[crtc] = vblwait->request.sequence;
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
dev->vblank[crtc].last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled));
@ -1367,7 +1322,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/* Vblank irq handling disabled. Nothing to do. */
if (!dev->vblank_enabled[crtc]) {
if (!dev->vblank[crtc].enabled) {
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
return false;
}
@ -1377,7 +1332,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
*/
/* Get current timestamp and count. */
vblcount = atomic_read(&dev->_vblank_count[crtc]);
vblcount = atomic_read(&dev->vblank[crtc].count);
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
@ -1401,14 +1356,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* the timestamp computed above.
*/
smp_mb__before_atomic_inc();
atomic_inc(&dev->_vblank_count[crtc]);
atomic_inc(&dev->vblank[crtc].count);
smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
}
DRM_WAKEUP(&dev->vbl_queue[crtc]);
DRM_WAKEUP(&dev->vblank[crtc].queue);
drm_handle_vblank_events(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);

Просмотреть файл

@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
if (drm_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}

Просмотреть файл

@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* @p: mode
* @adjust_flags: unused? (FIXME)
* @adjust_flags: a combination of adjustment flags
*
* LOCKING:
* None.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
*
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
* interlaced modes.
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
p->crtc_clock = p->clock;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vtotal *= p->vscan;
}
if (adjust_flags & CRTC_STEREO_DOUBLE) {
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
switch (layout) {
case DRM_MODE_FLAG_3D_FRAME_PACKING:
p->crtc_clock *= 2;
p->crtc_vdisplay += p->crtc_vtotal;
p->crtc_vsync_start += p->crtc_vtotal;
p->crtc_vsync_end += p->crtc_vtotal;
p->crtc_vtotal += p->crtc_vtotal;
break;
}
}
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock)
return false;
return drm_mode_equal_no_clocks(mode1, mode2);
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
return false;
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_equal_no_clocks - test modes for equality
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
* None.
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks.
* don't check the pixel clocks nor the stereo layout.
*
* RETURNS:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
mode1->flags == mode2->flags)
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
return false;
}
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
@ -1014,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
mode->type |= pmode->type;
mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;

Просмотреть файл

@ -80,7 +80,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
SetPageReserved(virt_to_page((void *)addr));
}
return dmah;
@ -103,7 +103,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
ClearPageReserved(virt_to_page((void *)addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
dev = drm_dev_alloc(driver, &pdev->dev);
if (!dev)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret)
goto err_g1;
goto err_free;
dev->pdev = pdev;
dev->dev = &pdev->dev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
mutex_lock(&drm_global_mutex);
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (drm_core_check_feature(dev, DRIVER_MODESET))
pci_set_drvdata(pdev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g2;
}
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
if (ret)
goto err_g21;
}
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
if (dev->driver->load) {
ret = dev->driver->load(dev, ent->driver_data);
if (ret)
goto err_g4;
}
/* setup the grouping for the legacy output */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g4;
}
list_add_tail(&dev->driver_item, &driver->device_list);
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
goto err_pci;
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
mutex_unlock(&drm_global_mutex);
return 0;
err_g4:
drm_put_minor(&dev->primary);
err_g3:
if (dev->render)
drm_put_minor(&dev->render);
err_g21:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g2:
err_pci:
pci_disable_device(pdev);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
err_free:
drm_dev_free(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);

Просмотреть файл

@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
dev = drm_dev_alloc(driver, &platdev->dev);
if (!dev)
return -ENOMEM;
dev->platformdev = platdev;
dev->dev = &platdev->dev;
mutex_lock(&drm_global_mutex);
ret = drm_fill_in_dev(dev, NULL, driver);
if (ret) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g1;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g1;
}
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
if (ret)
goto err_g11;
}
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
ret = drm_dev_register(dev, 0);
if (ret)
goto err_g2;
if (dev->driver->load) {
ret = dev->driver->load(dev, 0);
if (ret)
goto err_g3;
}
/* setup the grouping for the legacy output */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g3;
}
list_add_tail(&dev->driver_item, &driver->device_list);
mutex_unlock(&drm_global_mutex);
goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
return 0;
err_g3:
drm_put_minor(&dev->primary);
err_g2:
if (dev->render)
drm_put_minor(&dev->render);
err_g11:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
err_free:
drm_dev_free(dev);
return ret;
}

Просмотреть файл

@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
unsigned count;
struct scatterlist *sg;
struct page *page;
u32 len, offset;
u32 len;
int pg_index;
dma_addr_t addr;
pg_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
len = sg->length;
offset = sg->offset;
page = sg_page(sg);
addr = sg_dma_address(sg);

Просмотреть файл

@ -254,81 +254,21 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return 0;
}
int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
/* the DRM has 6 basic counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
dev->types[1] = _DRM_STAT_OPENS;
dev->types[2] = _DRM_STAT_CLOSES;
dev->types[3] = _DRM_STAT_IOCTLS;
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
dev->driver = driver;
if (dev->driver->bus->agp_init) {
retcode = dev->driver->bus->agp_init(dev);
if (retcode)
goto error_out_unreg;
}
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
return 0;
error_out_unreg:
drm_lastclose(dev);
return retcode;
}
EXPORT_SYMBOL(drm_fill_in_dev);
/**
* Get a secondary minor number.
* drm_get_minor - Allocate and register new DRM minor
* @dev: DRM device
* @minor: Pointer to where new minor is stored
* @type: Type of minor
*
* \param dev device data structure
* \param sec-minor structure to hold the assigned minor
* \return negative number on failure.
* Allocate a new minor of the given type and register it. A pointer to the new
* minor is returned in @minor.
* Caller must hold the global DRM mutex.
*
* Search an empty entry and initialize it to the given parameters. This
* routines assigns minor numbers to secondary heads of multi-headed cards
* RETURNS:
* 0 on success, negative error code on failure.
*/
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
int type)
{
struct drm_minor *new_minor;
int ret;
@ -385,37 +325,48 @@ err_idr:
*minor = NULL;
return ret;
}
EXPORT_SYMBOL(drm_get_minor);
/**
* Put a secondary minor number.
* drm_unplug_minor - Unplug DRM minor
* @minor: Minor to unplug
*
* \param sec_minor - structure to be released
* \return always zero
* Unplugs the given DRM minor but keeps the object. So after this returns,
* minor->dev is still valid so existing open-files can still access it to get
* device information from their drm_file ojects.
* If the minor is already unplugged or if @minor is NULL, nothing is done.
* The global DRM mutex must be held by the caller.
*/
int drm_put_minor(struct drm_minor **minor_p)
static void drm_unplug_minor(struct drm_minor *minor)
{
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
if (!minor || !device_is_registered(minor->kdev))
return;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(minor);
#endif
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);
kfree(minor);
*minor_p = NULL;
return 0;
}
EXPORT_SYMBOL(drm_put_minor);
static void drm_unplug_minor(struct drm_minor *minor)
/**
* drm_put_minor - Destroy DRM minor
* @minor: Minor to destroy
*
* This calls drm_unplug_minor() on the given minor and then frees it. Nothing
* is done if @minor is NULL. It is fine to call this on already unplugged
* minors.
* The global DRM mutex must be held by the caller.
*/
static void drm_put_minor(struct drm_minor *minor)
{
drm_sysfs_device_remove(minor);
if (!minor)
return;
DRM_DEBUG("release secondary minor %d\n", minor->index);
drm_unplug_minor(minor);
kfree(minor);
}
/**
@ -427,47 +378,15 @@ static void drm_unplug_minor(struct drm_minor *minor)
*/
void drm_put_dev(struct drm_device *dev)
{
struct drm_driver *driver;
struct drm_map_list *r_list, *list_temp;
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
driver = dev->driver;
drm_lastclose(dev);
if (dev->driver->unload)
dev->driver->unload(dev);
if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev);
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
if (dev->render)
drm_put_minor(&dev->render);
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
kfree(dev->devname);
kfree(dev);
drm_dev_unregister(dev);
drm_dev_free(dev);
}
EXPORT_SYMBOL(drm_put_dev);
@ -490,3 +409,206 @@ void drm_unplug_dev(struct drm_device *dev)
mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_unplug_dev);
/**
* drm_dev_alloc - Allocate new drm device
* @driver: DRM driver to allocate device for
* @parent: Parent device object
*
* Allocate and initialize a new DRM device. No device registration is done.
* Call drm_dev_register() to advertice the device to user space and register it
* with other core subsystems.
*
* RETURNS:
* Pointer to new DRM device, or NULL if out of memory.
*/
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent)
{
struct drm_device *dev;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->dev = parent;
dev->driver = driver;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
if (drm_ht_create(&dev->map_hash, 12))
goto err_free;
ret = drm_ctxbitmap_init(dev);
if (ret) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto err_ht;
}
if (driver->driver_features & DRIVER_GEM) {
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
goto err_ctxbitmap;
}
}
return dev;
err_ctxbitmap:
drm_ctxbitmap_cleanup(dev);
err_ht:
drm_ht_remove(&dev->map_hash);
err_free:
kfree(dev);
return NULL;
}
EXPORT_SYMBOL(drm_dev_alloc);
/**
* drm_dev_free - Free DRM device
* @dev: DRM device to free
*
* Free a DRM device that has previously been allocated via drm_dev_alloc().
* You must not use kfree() instead or you will leak memory.
*
* This must not be called once the device got registered. Use drm_put_dev()
* instead, which then calls drm_dev_free().
*/
void drm_dev_free(struct drm_device *dev)
{
drm_put_minor(dev->control);
drm_put_minor(dev->render);
drm_put_minor(dev->primary);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
kfree(dev->devname);
kfree(dev);
}
EXPORT_SYMBOL(drm_dev_free);
/**
* drm_dev_register - Register DRM device
* @dev: Device to register
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
* previously.
*
* Never call this twice on any device!
*
* RETURNS:
* 0 on success, negative error code on failure.
*/
int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
int ret;
mutex_lock(&drm_global_mutex);
if (dev->driver->bus->agp_init) {
ret = dev->driver->bus->agp_init(dev);
if (ret)
goto out_unlock;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_agp;
}
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
if (ret)
goto err_control_node;
}
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_render_node;
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
goto err_primary_node;
}
/* setup grouping for legacy outputs */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_unload;
}
list_add_tail(&dev->driver_item, &dev->driver->device_list);
ret = 0;
goto out_unlock;
err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_primary_node:
drm_put_minor(dev->primary);
err_render_node:
drm_put_minor(dev->render);
err_control_node:
drm_put_minor(dev->control);
err_agp:
if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev);
out_unlock:
mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_register);
/**
* drm_dev_unregister - Unregister DRM device
* @dev: Device to unregister
*
* Unregister the DRM device from the system. This does the reverse of
* drm_dev_register() but does not deallocate the device. The caller must call
* drm_dev_free() to free all resources.
*/
void drm_dev_unregister(struct drm_device *dev)
{
struct drm_map_list *r_list, *list_temp;
drm_lastclose(dev);
if (dev->driver->unload)
dev->driver->unload(dev);
if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev);
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_unplug_minor(dev->control);
drm_unplug_minor(dev->render);
drm_unplug_minor(dev->primary);
list_del(&dev->driver_item);
}
EXPORT_SYMBOL(drm_dev_unregister);

Просмотреть файл

@ -22,8 +22,8 @@
#include <drm/drm_core.h>
#include <drm/drmP.h>
#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
#define to_drm_minor(d) dev_get_drvdata(d)
#define to_drm_connector(d) dev_get_drvdata(d)
static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
drm_class = NULL;
}
/**
* drm_sysfs_device_release - do nothing
* @dev: Linux device
*
* Normally, this would free the DRM device associated with @dev, along
* with cleaning up any other stuff. But we do that in the DRM core, so
* this function can just return and hope that the core does its job.
*/
static void drm_sysfs_device_release(struct device *dev)
{
memset(dev, 0, sizeof(struct device));
return;
}
/*
* Connector properties
*/
@ -380,11 +366,6 @@ static struct bin_attribute edid_attr = {
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
*
* Note:
* This routine should only be called *once* for each registered connector.
* A second call for an already registered connector will trigger the BUG_ON
* below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
{
@ -394,29 +375,25 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
int i;
int ret;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
connector->kdev.parent = &dev->primary->kdev;
connector->kdev.class = drm_class;
connector->kdev.release = drm_sysfs_device_release;
if (connector->kdev)
return 0;
connector->kdev = device_create(drm_class, dev->primary->kdev,
0, connector, "card%d-%s",
dev->primary->index, drm_get_connector_name(connector));
DRM_DEBUG("adding \"%s\" to sysfs\n",
drm_get_connector_name(connector));
dev_set_name(&connector->kdev, "card%d-%s",
dev->primary->index, drm_get_connector_name(connector));
ret = device_register(&connector->kdev);
if (ret) {
DRM_ERROR("failed to register connector device: %d\n", ret);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
ret = PTR_ERR(connector->kdev);
goto out;
}
/* Standard attributes */
for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
@ -433,7 +410,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
@ -442,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
break;
}
ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
if (ret)
goto err_out_files;
@ -453,10 +430,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
err_out_files:
for (i = 0; i < opt_cnt; i++)
device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
for (i = 0; i < attr_cnt; i++)
device_remove_file(&connector->kdev, &connector_attrs[i]);
device_unregister(&connector->kdev);
device_remove_file(connector->kdev, &connector_attrs[i]);
device_unregister(connector->kdev);
out:
return ret;
@ -480,16 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
if (!connector->kdev.parent)
if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
drm_get_connector_name(connector));
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
device_remove_file(&connector->kdev, &connector_attrs[i]);
sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
device_unregister(&connector->kdev);
connector->kdev.parent = NULL;
device_remove_file(connector->kdev, &connector_attrs[i]);
sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
device_unregister(connector->kdev);
connector->kdev = NULL;
}
EXPORT_SYMBOL(drm_sysfs_connector_remove);
@ -508,7 +485,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
DRM_DEBUG("generating hotplug event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
@ -523,15 +500,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
*/
int drm_sysfs_device_add(struct drm_minor *minor)
{
int err;
char *minor_str;
minor->kdev.parent = minor->dev->dev;
minor->kdev.class = drm_class;
minor->kdev.release = drm_sysfs_device_release;
minor->kdev.devt = minor->device;
minor->kdev.type = &drm_sysfs_device_minor;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
else if (minor->type == DRM_MINOR_RENDER)
@ -539,18 +509,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
dev_set_name(&minor->kdev, minor_str, minor->index);
err = device_register(&minor->kdev);
if (err) {
DRM_ERROR("device add failed: %d\n", err);
goto err_out;
minor->kdev = device_create(drm_class, minor->dev->dev,
MKDEV(DRM_MAJOR, minor->index),
minor, minor_str, minor->index);
if (IS_ERR(minor->kdev)) {
DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
return PTR_ERR(minor->kdev);
}
return 0;
err_out:
return err;
}
/**
@ -562,9 +528,9 @@ err_out:
*/
void drm_sysfs_device_remove(struct drm_minor *minor)
{
if (minor->kdev.parent)
device_unregister(&minor->kdev);
minor->kdev.parent = NULL;
if (minor->kdev)
device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
minor->kdev = NULL;
}

Просмотреть файл

@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
struct drm_driver *driver)
{
struct drm_device *dev;
struct usb_device *usbdev;
int ret;
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
dev = drm_dev_alloc(driver, &interface->dev);
if (!dev)
return -ENOMEM;
usbdev = interface_to_usbdev(interface);
dev->usbdev = usbdev;
dev->dev = &interface->dev;
mutex_lock(&drm_global_mutex);
ret = drm_fill_in_dev(dev, NULL, driver);
if (ret) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g1;
}
dev->usbdev = interface_to_usbdev(interface);
usb_set_intfdata(interface, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
ret = drm_dev_register(dev, 0);
if (ret)
goto err_g1;
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
if (ret)
goto err_g11;
}
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
if (dev->driver->load) {
ret = dev->driver->load(dev, 0);
if (ret)
goto err_g3;
}
/* setup the grouping for the legacy output */
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g3;
list_add_tail(&dev->driver_item, &driver->device_list);
mutex_unlock(&drm_global_mutex);
goto err_free;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
return 0;
err_g3:
drm_put_minor(&dev->primary);
err_g2:
if (dev->render)
drm_put_minor(&dev->render);
err_g11:
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
err_free:
drm_dev_free(dev);
return ret;
}

Просмотреть файл

@ -301,7 +301,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
page = virt_to_page((void *)dma->pagelist[page_nr]);
get_page(page);
vmf->page = page;

Просмотреть файл

@ -2,6 +2,7 @@ config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT

Просмотреть файл

@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
.gem_init_object = exynos_drm_gem_init_object,
.gem_free_object = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,

Просмотреть файл

@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
/*
* enable drm irq mode.
* - with irq_enabled = 1, we can use the vblank feature.
* - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = 1;
drm_dev->irq_enabled = true;
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
drm_dev->vblank_disable_allowed = true;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))

Просмотреть файл

@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
}
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
return 0;
}
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;

Просмотреть файл

@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);

Просмотреть файл

@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
{
struct vidi_context *ctx = get_vidi_context(dev);
struct edid *edid;
int edid_len;
/*
* the edid data comes from user side and it would be set
@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
return ERR_PTR(-EFAULT);
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
edid = drm_edid_duplicate(ctx->raw_edid);
if (!edid) {
DRM_DEBUG_KMS("failed to allocate edid\n");
return ERR_PTR(-ENOMEM);
@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
/*
* enable drm irq mode.
* - with irq_enabled = 1, we can use the vblank feature.
* - with irq_enabled = true, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = 1;
drm_dev->irq_enabled = true;
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* with vblank_disable_allowed = true, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
drm_dev->vblank_disable_allowed = true;
return 0;
}
@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
int edid_len;
if (!vidi) {
DRM_DEBUG_KMS("user data for vidi is null.\n");
@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
}
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
return -ENOMEM;

Просмотреть файл

@ -5,6 +5,7 @@ config DRM_GMA500
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI

Просмотреть файл

@ -634,6 +634,7 @@ const struct psb_ops cdv_chip_ops = {
.crtcs = 2,
.hdmi_mask = (1 << 0) | (1 << 1),
.lvds_mask = (1 << 1),
.sdvo_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,

Просмотреть файл

@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = &connector->base.kdev;
intel_dp->adapter.dev.parent = connector->base.kdev;
if (is_edp(encoder))
cdv_intel_edp_panel_vdd_on(encoder);

Просмотреть файл

@ -714,7 +714,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
break;
case INTEL_OUTPUT_SDVO:
crtc_mask = ((1 << 0) | (1 << 1));
crtc_mask = dev_priv->ops->sdvo_mask;
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:

Просмотреть файл

@ -29,11 +29,6 @@
#include <drm/drm_vma_manager.h>
#include "psb_drv.h"
int psb_gem_init_object(struct drm_gem_object *obj)
{
return -EINVAL;
}
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);

Просмотреть файл

@ -51,6 +51,9 @@
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 20
@ -71,7 +74,8 @@ struct intel_gpio {
void
gma_intel_i2c_reset(struct drm_device *dev)
{
REG_WRITE(GMBUS0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
GMBUS_REG_WRITE(GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
@ -98,11 +102,10 @@ static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gpio *gpio)
{
struct drm_psb_private *dev_priv = gpio->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
reserved = REG_READ(gpio->reg) &
reserved = GMBUS_REG_READ(gpio->reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@ -113,29 +116,26 @@ static int get_clock(void *data)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
REG_WRITE(gpio->reg, reserved);
return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
GMBUS_REG_WRITE(gpio->reg, reserved);
return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
REG_WRITE(gpio->reg, reserved);
return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
GMBUS_REG_WRITE(gpio->reg, reserved);
return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
u32 clock_bits;
@ -145,15 +145,14 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
REG_WRITE(gpio->reg, reserved | clock_bits);
REG_READ(gpio->reg); /* Posting */
GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
GMBUS_REG_READ(gpio->reg); /* Posting */
}
static void set_data(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_psb_private *dev_priv = gpio->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = get_reserved(gpio);
u32 data_bits;
@ -163,8 +162,8 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
REG_WRITE(gpio->reg, reserved | data_bits);
REG_READ(gpio->reg);
GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
GMBUS_REG_READ(gpio->reg);
}
static struct i2c_adapter *
@ -251,7 +250,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_psb_private *dev_priv = adapter->algo_data;
struct drm_device *dev = dev_priv->dev;
int i, reg_offset;
if (bus->force_bit)
@ -260,28 +258,30 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = 0;
REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
u16 len = msgs[i].len;
u8 *buf = msgs[i].buf;
if (msgs[i].flags & I2C_M_RD) {
REG_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
REG_READ(GMBUS2+reg_offset);
GMBUS_REG_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
(i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
GMBUS_REG_READ(GMBUS2+reg_offset);
do {
u32 val, loop = 0;
if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
(GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
val = REG_READ(GMBUS3 + reg_offset);
val = GMBUS_REG_READ(GMBUS3 + reg_offset);
do {
*buf++ = val & 0xff;
val >>= 8;
@ -295,18 +295,20 @@ gmbus_xfer(struct i2c_adapter *adapter,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
REG_WRITE(GMBUS3 + reg_offset, val);
REG_WRITE(GMBUS1 + reg_offset,
GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
GMBUS_REG_WRITE(GMBUS1 + reg_offset,
(i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
REG_READ(GMBUS2+reg_offset);
GMBUS_REG_READ(GMBUS2+reg_offset);
while (len) {
if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
(GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
GMBUS_SATOER)
goto clear_err;
val = loop = 0;
@ -314,14 +316,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
REG_WRITE(GMBUS3 + reg_offset, val);
REG_READ(GMBUS2+reg_offset);
GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
GMBUS_REG_READ(GMBUS2+reg_offset);
}
}
if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
goto timeout;
if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
}
@ -332,20 +334,20 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
REG_WRITE(GMBUS1 + reg_offset, 0);
GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
done:
/* Mark the GMBUS interface as disabled. We will re-enable it at the
* start of the next xfer, till then let it sleep.
*/
REG_WRITE(GMBUS0 + reg_offset, 0);
GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
return i;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
bus->reg0 & 0xff, bus->adapter.name);
REG_WRITE(GMBUS0 + reg_offset, 0);
GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
@ -399,6 +401,11 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
if (dev_priv->gmbus == NULL)
return -ENOMEM;
if (IS_MRST(dev))
dev_priv->gmbus_reg = dev_priv->aux_reg;
else
dev_priv->gmbus_reg = dev_priv->vdc_reg;
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
@ -487,6 +494,7 @@ void gma_intel_teardown_gmbus(struct drm_device *dev)
i2c_del_adapter(&bus->adapter);
}
dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
kfree(dev_priv->gmbus);
dev_priv->gmbus = NULL;
}

Просмотреть файл

@ -26,24 +26,10 @@
#include "gma_display.h"
#include "power.h"
struct psb_intel_range_t {
int min, max;
};
struct oaktrail_limit_t {
struct psb_intel_range_t dot, m, p1;
};
struct oaktrail_clock_t {
/* derived values */
int dot;
int m;
int p1;
};
#define MRST_LIMIT_LVDS_100L 0
#define MRST_LIMIT_LVDS_83 1
#define MRST_LIMIT_LVDS_100 2
#define MRST_LIMIT_LVDS_100L 0
#define MRST_LIMIT_LVDS_83 1
#define MRST_LIMIT_LVDS_100 2
#define MRST_LIMIT_SDVO 3
#define MRST_DOT_MIN 19750
#define MRST_DOT_MAX 120000
@ -57,21 +43,40 @@ struct oaktrail_clock_t {
#define MRST_P1_MAX_0 7
#define MRST_P1_MAX_1 8
static const struct oaktrail_limit_t oaktrail_limits[] = {
static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock);
static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock);
static const struct gma_limit_t mrst_limits[] = {
{ /* MRST_LIMIT_LVDS_100L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
.find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_LVDS_83L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
.find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_LVDS_100 */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
.find_pll = mrst_lvds_find_best_pll,
},
{ /* MRST_LIMIT_SDVO */
.vco = {.min = 1400000, .max = 2800000},
.n = {.min = 3, .max = 7},
.m = {.min = 80, .max = 137},
.p1 = {.min = 1, .max = 2},
.p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
.find_pll = mrst_sdvo_find_best_pll,
},
};
@ -82,9 +87,10 @@ static const u32 oaktrail_m_converts[] = {
0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
};
static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
int refclk)
{
const struct oaktrail_limit_t *limit = NULL;
const struct gma_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
@ -92,45 +98,100 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
break;
case 166:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
limit = &mrst_limits[MRST_LIMIT_LVDS_83];
break;
case 200:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
limit = &mrst_limits[MRST_LIMIT_LVDS_100];
break;
}
} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &mrst_limits[MRST_LIMIT_SDVO];
} else {
limit = NULL;
dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
dev_err(dev->dev, "mrst_limit Wrong display type.\n");
}
return limit;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
{
clock->dot = (refclk * clock->m) / (14 * clock->p1);
}
static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
static void mrst_print_pll(struct gma_clock_t *clock)
{
pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
prefix, clock->dot, clock->m, clock->p1);
DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
clock->dot, clock->m, clock->m1, clock->m2, clock->n,
clock->p1, clock->p2);
}
static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock)
{
struct gma_clock_t clock;
u32 target_vco, actual_freq;
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
/* p2 value always stored in p2_slow on SDVO */
clock.p = clock.p1 * limit->p2.p2_slow;
target_vco = target * clock.p;
/* VCO will increase at this point so break */
if (target_vco > limit->vco.max)
break;
if (target_vco < limit->vco.min)
continue;
actual_freq = (refclk * clock.m) /
(clock.n * clock.p);
freq_error = 10000 -
((target * 10000) / actual_freq);
if (freq_error < -min_error) {
/* freq_error will start to decrease at
this point so break */
break;
}
if (freq_error < 0)
freq_error = -freq_error;
if (freq_error < min_error) {
min_error = freq_error;
*best_clock = clock;
}
}
}
if (min_error == 0)
break;
}
return min_error == 0;
}
/**
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
static bool
mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
struct oaktrail_clock_t *best_clock)
static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
int refclk, struct gma_clock_t *best_clock)
{
struct oaktrail_clock_t clock;
const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
struct gma_clock_t clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@ -140,7 +201,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
clock.p1++) {
int this_err;
oaktrail_clock(refclk, &clock);
mrst_lvds_clock(refclk, &clock);
this_err = abs(clock.dot - target);
if (this_err < err) {
@ -149,7 +210,6 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
}
}
}
dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
return err != target;
}
@ -167,8 +227,10 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
int i;
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
if (pipe == 1) {
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
oaktrail_crtc_hdmi_dpms(crtc, mode);
return;
}
@ -183,35 +245,45 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
REG_WRITE(map->dpll, temp);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(map->base, REG_READ(map->base));
}
for (i = 0; i <= need_aux; i++) {
/* Enable the DPLL */
temp = REG_READ_WITH_AUX(map->dpll, i);
if ((temp & DPLL_VCO_ENABLE) == 0) {
REG_WRITE_WITH_AUX(map->dpll, temp, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE_WITH_AUX(map->dpll,
temp | DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE_WITH_AUX(map->dpll,
temp | DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
temp = REG_READ_WITH_AUX(map->conf, i);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE_WITH_AUX(map->conf,
temp | PIPEACONF_ENABLE, i);
}
/* Enable the plane */
temp = REG_READ_WITH_AUX(map->cntr, i);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE_WITH_AUX(map->cntr,
temp | DISPLAY_PLANE_ENABLE,
i);
/* Flush the plane changes */
REG_WRITE_WITH_AUX(map->base,
REG_READ_WITH_AUX(map->base, i), i);
}
}
gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
@ -223,48 +295,52 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(map->base, REG_READ(map->base));
REG_READ(map->base);
}
for (i = 0; i <= need_aux; i++) {
/* Disable the VGA plane that we never use */
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
/* Disable display plane */
temp = REG_READ_WITH_AUX(map->cntr, i);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE_WITH_AUX(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE, i);
/* Flush the plane changes */
REG_WRITE_WITH_AUX(map->base,
REG_READ(map->base), i);
REG_READ_WITH_AUX(map->base, i);
}
/* Next, disable display pipes */
temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
gma_wait_for_vblank(dev);
/* Next, disable display pipes */
temp = REG_READ_WITH_AUX(map->conf, i);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE_WITH_AUX(map->conf,
temp & ~PIPEACONF_ENABLE, i);
REG_READ_WITH_AUX(map->conf, i);
}
/* Wait for for the pipe disable to take effect. */
gma_wait_for_vblank(dev);
temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
REG_READ(map->dpll);
}
temp = REG_READ_WITH_AUX(map->dpll, i);
if ((temp & DPLL_VCO_ENABLE) != 0) {
REG_WRITE_WITH_AUX(map->dpll,
temp & ~DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
}
/* Wait for the clocks to turn off. */
udelay(150);
/* Wait for the clocks to turn off. */
udelay(150);
}
break;
}
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3FFF);
REG_WRITE(DSPFW1, 0x3F88080A);
REG_WRITE(DSPFW2, 0x0b060808);
/* Set FIFO Watermarks (values taken from EMGD) */
REG_WRITE(DSPARB, 0x3f80);
REG_WRITE(DSPFW1, 0x3f8f0404);
REG_WRITE(DSPFW2, 0x04040f04);
REG_WRITE(DSPFW3, 0x0);
REG_WRITE(DSPFW4, 0x08030404);
REG_WRITE(DSPFW4, 0x04040404);
REG_WRITE(DSPFW5, 0x04040404);
REG_WRITE(DSPFW6, 0x78);
REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
/* Must write Bit 14 of the Chicken Bit Register */
REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
gma_power_end(dev);
}
@ -297,7 +373,8 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
struct gma_clock_t clock;
const struct gma_limit_t *limit;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false;
@ -306,8 +383,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
int i;
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
if (pipe == 1)
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
if (!gma_power_begin(dev, true))
@ -340,15 +419,17 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
}
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
for (i = 0; i <= need_aux; i++)
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
/* Disable the panel fitter if it was on our pipe */
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1), i);
}
if (gma_encoder)
drm_object_property_get_value(&connector->base,
@ -365,35 +446,39 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
REG_WRITE(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
REG_WRITE(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
REG_WRITE(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
}
} else {
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16), i);
REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16), i);
REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16), i);
}
}
/* Flush the plane changes */
@ -418,21 +503,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (is_mipi)
goto oaktrail_crtc_mode_set_exit;
refclk = dev_priv->core_freq * 1000;
dpll = 0; /*BIT16 = 0 for 100MHz reference */
ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
limit = mrst_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
refclk, &clock);
if (!ok) {
dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
} else {
dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
"m = %x, p1 = %x.\n", clock.dot, clock.m,
clock.p1);
if (is_sdvo) {
/* Convert calculated values to register values */
clock.p1 = (1L << (clock.p1 - 1));
clock.m -= 2;
clock.n = (1L << (clock.n - 1));
}
fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
if (!ok)
DRM_ERROR("Failed to find proper PLL settings");
mrst_print_pll(&clock);
if (is_sdvo)
fp = clock.n << 16 | clock.m;
else
fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
dpll |= DPLL_VGA_MODE_DIS;
@ -456,38 +550,43 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 2)) << 17;
if (is_sdvo)
dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
else
dpll |= (1 << (clock.p1 - 2)) << 17;
dpll |= DPLL_VCO_ENABLE;
mrstPrintPll("chosen", &clock);
if (dpll & DPLL_VCO_ENABLE) {
REG_WRITE(map->fp0, fp);
REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
REG_READ(map->dpll);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->fp0, fp, i);
REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
}
REG_WRITE(map->fp0, fp);
REG_WRITE(map->dpll, dpll);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
for (i = 0; i <= need_aux; i++) {
REG_WRITE_WITH_AUX(map->fp0, fp, i);
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
REG_WRITE(map->dpll, dpll);
REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
REG_READ_WITH_AUX(map->dpll, i);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
gma_wait_for_vblank(dev);
REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
REG_READ_WITH_AUX(map->conf, i);
gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
gma_wait_for_vblank(dev);
REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
gma_wait_for_vblank(dev);
}
oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
@ -565,3 +664,9 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.commit = gma_crtc_commit,
};
/* Not used yet */
const struct gma_clock_funcs mrst_clock_funcs = {
.clock = mrst_lvds_clock,
.limit = mrst_limit,
.pll_is_valid = gma_pll_is_valid,
};

Просмотреть файл

@ -40,6 +40,9 @@ static int oaktrail_output_init(struct drm_device *dev)
dev_err(dev->dev, "DSI is not supported\n");
if (dev_priv->hdmi_priv)
oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
}
@ -526,6 +529,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
}
gma_intel_setup_gmbus(dev);
oaktrail_hdmi_setup(dev);
return 0;
}
@ -534,6 +538,7 @@ static void oaktrail_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
gma_intel_teardown_gmbus(dev);
oaktrail_hdmi_teardown(dev);
if (!dev_priv->has_gct)
psb_intel_destroy_bios(dev);
@ -546,6 +551,7 @@ const struct psb_ops oaktrail_chip_ops = {
.crtcs = 2,
.hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0),
.sdvo_mask = (1 << 1),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,

Просмотреть файл

@ -218,30 +218,6 @@ static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
.commit = oaktrail_lvds_commit,
};
static struct drm_display_mode lvds_configuration_modes[] = {
/* hard coded fixed mode for TPO LTPS LPJ040K001A */
{ DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
846, 1056, 0, 480, 489, 491, 525, 0, 0) },
/* hard coded fixed mode for LVDS 800x480 */
{ DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
802, 1024, 0, 480, 481, 482, 525, 0, 0) },
/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
/* hard coded fixed mode for LVDS 1024x768 */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
/* hard coded fixed mode for LVDS 1366x768 */
{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
};
/* Returns the panel fixed mode from configuration. */
static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
@ -303,10 +279,10 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev,
dev_priv->lfp_lvds_vbt_mode);
/* Then guess */
/* If we still got no mode then bail */
if (mode_dev->panel_fixed_mode == NULL)
mode_dev->panel_fixed_mode
= drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
return;
drm_mode_set_name(mode_dev->panel_fixed_mode);
drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);

Просмотреть файл

@ -373,6 +373,7 @@ const struct psb_ops psb_chip_ops = {
.crtcs = 2,
.hdmi_mask = (1 << 0),
.lvds_mask = (1 << 1),
.sdvo_mask = (1 << 0),
.cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,

Просмотреть файл

@ -251,6 +251,12 @@ static int psb_driver_unload(struct drm_device *dev)
iounmap(dev_priv->sgx_reg);
dev_priv->sgx_reg = NULL;
}
if (dev_priv->aux_reg) {
iounmap(dev_priv->aux_reg);
dev_priv->aux_reg = NULL;
}
if (dev_priv->aux_pdev)
pci_dev_put(dev_priv->aux_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
@ -266,7 +272,7 @@ static int psb_driver_unload(struct drm_device *dev)
static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct drm_psb_private *dev_priv;
unsigned long resource_start;
unsigned long resource_start, resource_len;
unsigned long irqflags;
int ret = -ENOMEM;
struct drm_connector *connector;
@ -296,6 +302,30 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->sgx_reg)
goto out_err;
if (IS_MRST(dev)) {
dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
if (dev_priv->aux_pdev) {
resource_start = pci_resource_start(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
dev_priv->aux_reg = ioremap_nocache(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
DRM_DEBUG_KMS("Found aux vdc");
} else {
/* Couldn't find the aux vdc so map to primary vdc */
dev_priv->aux_reg = dev_priv->vdc_reg;
DRM_DEBUG_KMS("Couldn't find aux pci device");
}
dev_priv->gmbus_reg = dev_priv->aux_reg;
} else {
dev_priv->gmbus_reg = dev_priv->vdc_reg;
}
psb_intel_opregion_setup(dev);
ret = dev_priv->ops->chip_setup(dev);
@ -359,7 +389,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_irq_install(dev);
dev->vblank_disable_allowed = 1;
dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@ -449,7 +479,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
dev_dbg(dev->dev, "Invalid Connector object.\n");
return -EINVAL;
return -ENOENT;
}
connector = obj_to_connector(obj);
@ -491,7 +521,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto mode_op_out;
}
@ -646,7 +676,6 @@ static struct drm_driver driver = {
.preclose = psb_driver_preclose,
.postclose = psb_driver_close,
.gem_init_object = psb_gem_init_object,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,

Просмотреть файл

@ -44,10 +44,10 @@ enum {
CHIP_MFLD_0130 = 3, /* Medfield */
};
#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
/*
* Driver definitions
@ -75,6 +75,7 @@ enum {
* PCI resource identifiers
*/
#define PSB_MMIO_RESOURCE 0
#define PSB_AUX_RESOURCE 0
#define PSB_GATT_RESOURCE 2
#define PSB_GTT_RESOURCE 3
/*
@ -455,6 +456,7 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
struct pci_dev *aux_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
const struct psb_offset *regmap;
@ -486,6 +488,7 @@ struct drm_psb_private {
uint8_t __iomem *sgx_reg;
uint8_t __iomem *vdc_reg;
uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
uint32_t gatt_free_offset;
/*
@ -532,6 +535,7 @@ struct drm_psb_private {
/* gmbus */
struct intel_gmbus *gmbus;
uint8_t __iomem *gmbus_reg;
/* Used by SDVO */
int crt_ddc_pin;
@ -672,6 +676,7 @@ struct psb_ops {
int sgx_offset; /* Base offset of SGX device */
int hdmi_mask; /* Mask of HDMI CRTCs */
int lvds_mask; /* Mask of LVDS CRTCs */
int sdvo_mask; /* Mask of SDVO CRTCs */
int cursor_needs_phys; /* If cursor base reg need physical address */
/* Sub functions */
@ -837,7 +842,6 @@ extern const struct drm_connector_helper_funcs
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
extern int psb_gem_init_object(struct drm_gem_object *obj);
extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
@ -928,16 +932,58 @@ static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
return ioread32(dev_priv->vdc_reg + reg);
}
static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
{
struct drm_psb_private *dev_priv = dev->dev_private;
return ioread32(dev_priv->aux_reg + reg);
}
#define REG_READ(reg) REGISTER_READ(dev, (reg))
#define REG_READ_AUX(reg) REGISTER_READ_AUX(dev, (reg))
/* Useful for post reads */
static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
uint32_t reg, int aux)
{
uint32_t val;
if (aux)
val = REG_READ_AUX(reg);
else
val = REG_READ(reg);
return val;
}
#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
uint32_t val)
uint32_t val)
{
struct drm_psb_private *dev_priv = dev->dev_private;
iowrite32((val), dev_priv->vdc_reg + (reg));
}
static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
struct drm_psb_private *dev_priv = dev->dev_private;
iowrite32((val), dev_priv->aux_reg + (reg));
}
#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
#define REG_WRITE_AUX(reg, val) REGISTER_WRITE_AUX(dev, (reg), (val))
static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
uint32_t val, int aux)
{
if (aux)
REG_WRITE_AUX(reg, val);
else
REG_WRITE(reg, val);
}
#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
static inline void REGISTER_WRITE16(struct drm_device *dev,
uint32_t reg, uint32_t val)

Просмотреть файл

@ -572,7 +572,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
if (!drmmode_obj) {
dev_err(dev->dev, "no such CRTC id\n");
return -EINVAL;
return -ENOENT;
}
crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));

Просмотреть файл

@ -228,24 +228,26 @@ static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u3
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
u32 bval = val, cval = val;
int i;
int i, j;
int need_aux = IS_MRST(dev) ? 1 : 0;
if (psb_intel_sdvo->sdvo_reg == SDVOB) {
cval = REG_READ(SDVOC);
} else {
bval = REG_READ(SDVOB);
}
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++)
{
REG_WRITE(SDVOB, bval);
REG_READ(SDVOB);
REG_WRITE(SDVOC, cval);
REG_READ(SDVOC);
for (j = 0; j <= need_aux; j++) {
if (psb_intel_sdvo->sdvo_reg == SDVOB)
cval = REG_READ_WITH_AUX(SDVOC, j);
else
bval = REG_READ_WITH_AUX(SDVOB, j);
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
REG_WRITE_WITH_AUX(SDVOB, bval, j);
REG_READ_WITH_AUX(SDVOB, j);
REG_WRITE_WITH_AUX(SDVOC, cval, j);
REG_READ_WITH_AUX(SDVOC, j);
}
}
}
@ -995,6 +997,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
struct psb_intel_sdvo_dtd input_dtd;
int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
int need_aux = IS_MRST(dev) ? 1 : 0;
if (!mode)
return;
@ -1060,7 +1063,11 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
return;
/* Set the SDVO control regs. */
sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
if (need_aux)
sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
switch (psb_intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
@ -1090,6 +1097,8 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 temp;
int i;
int need_aux = IS_MRST(dev) ? 1 : 0;
switch (mode) {
case DRM_MODE_DPMS_ON:
@ -1108,19 +1117,27 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
if (mode == DRM_MODE_DPMS_OFF) {
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if (need_aux)
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
bool input1, input2;
int i;
u8 status;
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if (need_aux)
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
gma_wait_for_vblank(dev);

Просмотреть файл

@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
if (gma_power_is_on(dev))
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
if (dev->vblank_enabled[1])
if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
/* FIXME: Handle Medfield irq mask
if (dev->vblank_enabled[1])
if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
if (dev->vblank_enabled[2])
if (dev->vblank[2].enabled)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
*/
@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
if (dev->vblank[0].enabled)
psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
if (dev->vblank_enabled[1])
if (dev->vblank[1].enabled)
psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
if (dev->vblank_enabled[2])
if (dev->vblank[2].enabled)
psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
if (dev->vblank[0].enabled)
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
if (dev->vblank_enabled[1])
if (dev->vblank[1].enabled)
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
if (dev->vblank_enabled[2])
if (dev->vblank[2].enabled)
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
{
unsigned int cur_vblank;
int ret = 0;
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/hdmi.h>
#include <linux/module.h>
#include <drm/drmP.h>
@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
buf[HB(0)] = 0x82;
buf[HB(1)] = 0x02;
buf[HB(2)] = 13;
buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
buf[PB(4)] = drm_match_cea_mode(mode);
tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,

Просмотреть файл

@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
dma->buflist[vertex->idx],
vertex->discard, vertex->used);
atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
mc->last_render);
atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i810 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
pci_set_master(dev->pdev);
return 0;

Просмотреть файл

@ -0,0 +1,67 @@
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
select DRM_KMS_HELPER
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
replaces the older i830 module that supported a subset of the
hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
help
Choose this option if you want kernel modesetting enabled by default,
and you have a new enough userspace to support this. Running old
userspaces with this enabled will cause pain. Note that this causes
the driver to bind to PCI devices, which precludes loading things
like intelfb.
config DRM_I915_FBDEV
bool "Enable legacy fbdev support for the modesettting intel driver"
depends on DRM_I915
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
default y
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provide the linux console
support on top of the intel modesetting driver.
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
help
Choose this option if you have prerelease Intel hardware and want the
i915 driver to support it by default. You can enable such support at
runtime with the module option i915.preliminary_hw_support=1; this
option changes the default for that module option.
If in doubt, say "N".

Просмотреть файл

@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_dsi.o \
intel_dsi_cmd.o \
intel_dsi_pll.o \
intel_bios.o \
intel_ddi.o \
intel_dp.o \
@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_panel.o \
intel_pm.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
intel_ringbuffer.o \
@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_ACPI) += intel_acpi.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)

Просмотреть файл

@ -76,17 +76,6 @@ struct intel_dvo_dev_ops {
int (*mode_valid)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode);
/*
* Callback to adjust the mode to be set in the CRTC.
*
* This allows an output to adjust the clock or even the entire set of
* timings, which is used for panels with fixed timings or for
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
* Callback for preparing mode changes on an output
*/

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -52,7 +52,7 @@
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
__intel_ring_advance(LP_RING(dev_priv))
/**
* Lock test for when it's just for synchronization of ring access.
@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
sizeof(struct drm_clip_rect),
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
sizeof(*cliprects), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
value = dev->pdev->device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
@ -1311,13 +1311,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
intel_power_domains_init_hw(dev);
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = i915_gem_init(dev);
if (ret)
goto cleanup_irq;
goto cleanup_power;
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
@ -1325,9 +1327,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
if (INTEL_INFO(dev)->num_pipes == 0)
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
intel_display_power_put(dev, POWER_DOMAIN_VGA);
return 0;
}
ret = intel_fbdev_init(dev);
if (ret)
@ -1362,7 +1366,8 @@ cleanup_gem:
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
cleanup_power:
intel_display_power_put(dev, POWER_DOMAIN_VGA);
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
@ -1398,6 +1403,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
#ifdef CONFIG_DRM_I915_FBDEV
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@ -1418,6 +1424,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
#else
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
}
#endif
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
@ -1459,17 +1470,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags;
/* Refuse to load on gen6+ without kms enabled. */
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
return -ENODEV;
}
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
@ -1494,6 +1501,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
intel_display_crc_init(dev);
i915_dump_device_info(dev_priv);
/* Not all pre-production machines fall into this category, only the
@ -1531,19 +1540,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_early_sanitize(dev);
if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
intel_uncore_init(dev);
ret = i915_gem_gtt_init(dev);
if (ret)
goto put_bridge;
goto out_regs;
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@ -1572,7 +1576,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
aperture_size);
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
goto out_gtt;
}
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@ -1598,13 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
intel_irq_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev);
intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@ -1640,13 +1640,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
if (HAS_POWER_WELL(dev))
i915_init_power_well(dev);
intel_power_domains_init(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
goto out_power_well;
}
} else {
/* Start out suspended in ums mode. */
@ -1666,6 +1666,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_power_well:
if (HAS_POWER_WELL(dev))
intel_power_domains_remove(dev);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@ -1679,12 +1683,18 @@ out_gem_unload:
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
list_del(&dev_priv->gtt.base.global_link);
drm_mm_takedown(&dev_priv->gtt.base.mm);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
out_rmmap:
out_regs:
intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
kfree(dev_priv);
return ret;
}
@ -1700,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_set_power_well(dev, true);
i915_remove_power_well(dev);
intel_display_set_init_power(dev, true);
intel_power_domains_remove(dev);
}
i915_teardown_sysfs(dev);
@ -1709,15 +1719,9 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@ -1774,8 +1778,8 @@ int i915_driver_unload(struct drm_device *dev)
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
drm_vblank_cleanup(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@ -1785,6 +1789,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
intel_uncore_fini(dev);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@ -1796,19 +1804,11 @@ int i915_driver_unload(struct drm_device *dev)
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
int ret;
DRM_DEBUG_DRIVER("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
file->driver_priv = file_priv;
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
idr_init(&file_priv->context_idr);
ret = i915_gem_open(dev, file);
if (ret)
return ret;
return 0;
}
@ -1836,7 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev);
intel_fbdev_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}

Просмотреть файл

@ -160,49 +160,58 @@ extern int intel_agp_enabled;
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_845g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i85x_info = {
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i865g_info = {
.gen = 2, .num_pipes = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i965g_info = {
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i965gm_info = {
@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_g33_info = {
.gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_pineview_info = {
@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
.has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
.has_force_wake = 1,
};
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.has_bsd_ring = 1, \
.has_blt_ring = 1, \
.has_llc = 1, \
.has_force_wake = 1
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_vebox_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
static const struct intel_device_info intel_haswell_m_info = {
@ -328,7 +333,25 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
.has_vebox_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
static const struct intel_device_info intel_broadwell_d_info = {
.is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
};
static const struct intel_device_info intel_broadwell_m_info = {
.is_preliminary = 1,
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
};
/*
@ -362,7 +385,9 @@ static const struct intel_device_info intel_haswell_m_info = {
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info)
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
INTEL_BDW_D_IDS(&intel_broadwell_d_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@ -416,13 +441,19 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("This is Broadwell, assuming "
"LynxPoint LP PCH\n");
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
@ -447,6 +478,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return 0;
/* Until we get further testing... */
if (IS_GEN8(dev)) {
WARN_ON(!i915_preliminary_hw_support);
return 0;
}
if (i915_semaphores >= 0)
return i915_semaphores;
@ -472,7 +509,7 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
intel_set_power_well(dev, true);
intel_display_set_init_power(dev, true);
drm_kms_helper_poll_disable(dev);
@ -482,9 +519,7 @@ static int i915_drm_freeze(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
mutex_lock(&dev->struct_mutex);
error = i915_gem_idle(dev);
mutex_unlock(&dev->struct_mutex);
error = i915_gem_suspend(dev);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
@ -578,11 +613,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
drm_helper_hpd_irq_event(dev);
}
static int __i915_drm_thaw(struct drm_device *dev)
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
intel_uncore_early_sanitize(dev);
intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
intel_power_domains_init_hw(dev);
i915_restore_state(dev);
intel_opregion_setup(dev);
@ -642,20 +690,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
} else if (drm_core_check_feature(dev, DRIVER_MODESET))
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_check_and_clear_faults(dev);
__i915_drm_thaw(dev);
return error;
return __i915_drm_thaw(dev, true);
}
int i915_resume(struct drm_device *dev)
@ -671,20 +709,12 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
intel_uncore_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
* earlier) need to restore the GTT mappings since the BIOS might clear
* all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
ret = __i915_drm_thaw(dev);
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (ret)
return ret;
@ -722,24 +752,19 @@ int i915_reset(struct drm_device *dev)
simulated = dev_priv->gpu_error.stop_rings != 0;
if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
ret = -ENODEV;
} else {
ret = intel_gpu_reset(dev);
ret = intel_gpu_reset(dev);
/* Also reset the gpu hangman. */
if (simulated) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
} else
dev_priv->gpu_error.last_reset = get_seconds();
/* Also reset the gpu hangman. */
if (simulated) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
}
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@ -762,30 +787,17 @@ int i915_reset(struct drm_device *dev)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
dev_priv->ums.mm_suspended = 0;
i915_gem_init_swizzling(dev);
for_each_ring(ring, dev_priv, i)
ring->init(ring);
i915_gem_context_init(dev);
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret)
i915_gem_cleanup_aliasing_ppgtt(dev);
}
/*
* It would make sense to re-init all the other hw state, at
* least the rps/rc6/emon init done within modeset_init_hw. For
* some unknown reason, this blows up my ilk, so don't.
*/
ret = i915_gem_init_hw(dev);
if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
DRM_ERROR("HW contexts didn't survive reset\n");
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
return ret;
}
drm_irq_uninstall(dev);
drm_irq_install(dev);
@ -802,6 +814,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
}
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
@ -949,7 +967,6 @@ static struct drm_driver driver = {
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,

Просмотреть файл

@ -54,6 +54,7 @@
#define DRIVER_DATE "20080730"
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
PIPE_B,
PIPE_C,
@ -98,13 +99,29 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_VGA,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
};
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP))
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
enum hpd_pin {
HPD_NONE = 0,
@ -225,9 +242,12 @@ struct intel_opregion {
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
struct work_struct asle_work;
};
#define OPREGION_SIZE (8*1024)
@ -285,6 +305,7 @@ struct drm_i915_error_state {
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
u32 bbstate[I915_NUM_RINGS];
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
@ -321,11 +342,13 @@ struct drm_i915_error_state {
u32 dirty:1;
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
u32 cache_level:3;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
int hangcheck_score[I915_NUM_RINGS];
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
struct intel_crtc_config;
@ -357,7 +380,7 @@ struct drm_i915_display_funcs {
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
@ -367,7 +390,6 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@ -375,7 +397,8 @@ struct drm_i915_display_funcs {
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
struct drm_crtc *crtc,
struct drm_display_mode *mode);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@ -395,6 +418,20 @@ struct drm_i915_display_funcs {
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
uint8_t val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
uint16_t val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
uint32_t val, bool trace);
void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
uint64_t val, bool trace);
};
struct intel_uncore {
@ -404,6 +441,8 @@ struct intel_uncore {
unsigned fifo_count;
unsigned forcewake_count;
struct delayed_work force_wake_work;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@ -420,7 +459,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
func(has_force_wake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
@ -428,9 +467,6 @@ struct intel_uncore {
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_bsd_ring) sep \
func(has_blt_ring) sep \
func(has_vebox_ring) sep \
func(has_llc) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
@ -442,6 +478,7 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
};
@ -542,10 +579,21 @@ struct i915_gtt {
struct i915_hw_ppgtt {
struct i915_address_space base;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
union {
struct page **pt_pages;
struct page *gen8_pt_pages;
};
struct page *pd_pages;
int num_pd_pages;
int num_pt_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[4];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
int (*enable)(struct drm_device *dev);
};
@ -570,6 +618,13 @@ struct i915_vma {
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
};
struct i915_ctx_hang_stats {
@ -578,6 +633,12 @@ struct i915_ctx_hang_stats {
/* This context had batch active when hang was declared */
unsigned batch_active;
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
/* This context is banned to submit more work */
bool banned;
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
@ -586,10 +647,13 @@ struct i915_hw_context {
struct kref ref;
int id;
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct list_head link;
};
struct i915_fbc {
@ -623,17 +687,9 @@ struct i915_fbc {
} no_fbc_reason;
};
enum no_psr_reason {
PSR_NO_SOURCE, /* Not supported on platform */
PSR_NO_SINK, /* Not supported by panel */
PSR_MODULE_PARAM,
PSR_CRTC_NOT_ACTIVE,
PSR_PWR_WELL_ENABLED,
PSR_NOT_TILED,
PSR_SPRITE_ENABLED,
PSR_S3D_ENABLED,
PSR_INTERLACED_ENABLED,
PSR_HSW_NOT_DDIA,
struct i915_psr {
bool sink_support;
bool source_ok;
};
enum intel_pch {
@ -704,6 +760,9 @@ struct i915_suspend_saved_registers {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
u32 saveBLC_PWM_CTL_B;
u32 saveBLC_PWM_CTL2_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@ -823,17 +882,20 @@ struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
/* On vlv we need to manually drop to Vmin with a delayed work. */
struct delayed_work vlv_work;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
u8 rp1_delay;
u8 rp0_delay;
u8 hw_max;
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled;
struct delayed_work delayed_resume_work;
/*
@ -870,11 +932,21 @@ struct intel_ilk_power_mgmt {
/* Power well structure for haswell */
struct i915_power_well {
struct drm_device *device;
spinlock_t lock;
/* power well enable/disable usage count */
int count;
int i915_request;
};
#define I915_MAX_POWER_WELLS 1
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
struct mutex lock;
struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
};
struct i915_dri1_state {
@ -902,9 +974,11 @@ struct i915_ums_state {
int mm_suspended;
};
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info;
u32 *remap_info[MAX_L3_SLICES];
struct work_struct error_work;
int which_slice;
};
struct i915_gem_mm {
@ -941,6 +1015,15 @@ struct i915_gem_mm {
*/
struct delayed_work retire_work;
/**
* When we detect an idle GPU, we want to turn on
* powersaving features. So once we see that there
* are no more requests outstanding and no more
* arrive within a small period of time, we fire
* off the idle_work.
*/
struct delayed_work idle_work;
/**
* Are we in a non-interruptible section of code like
* modesetting?
@ -979,6 +1062,9 @@ struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
struct timer_list hangcheck_timer;
/* For reset and error_state handling. */
@ -987,7 +1073,8 @@ struct i915_gpu_error {
struct drm_i915_error_state *first_error;
struct work_struct work;
unsigned long last_reset;
unsigned long missed_irq_rings;
/**
* State variable and reset counter controlling the reset flow
@ -1027,6 +1114,9 @@ struct i915_gpu_error {
/* For gpu hang simulation. */
unsigned int stop_rings;
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
};
enum modeset_restore {
@ -1035,6 +1125,14 @@ enum modeset_restore {
MODESET_SUSPENDED,
};
struct ddi_vbt_port_info {
uint8_t hdmi_level_shift;
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
};
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@ -1060,10 +1158,17 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
/* MIPI DSI */
struct {
u16 panel_id;
} dsi;
int crt_ddc_pin;
int child_dev_num;
struct child_device_config *child_dev;
union child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
};
enum intel_ddb_partitioning {
@ -1079,6 +1184,15 @@ struct intel_wm_level {
uint32_t fbc_val;
};
struct hsw_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
uint32_t wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
/*
* This struct tracks the state needed for the Package C8+ feature.
*
@ -1148,6 +1262,36 @@ struct i915_package_c8 {
} regsave;
};
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
INTEL_PIPE_CRC_SOURCE_PLANE2,
INTEL_PIPE_CRC_SOURCE_PF,
INTEL_PIPE_CRC_SOURCE_PIPE,
/* TV/DP on pre-gen5/vlv can't use the pipe source. */
INTEL_PIPE_CRC_SOURCE_TV,
INTEL_PIPE_CRC_SOURCE_DP_B,
INTEL_PIPE_CRC_SOURCE_DP_C,
INTEL_PIPE_CRC_SOURCE_DP_D,
INTEL_PIPE_CRC_SOURCE_AUTO,
INTEL_PIPE_CRC_SOURCE_MAX,
};
struct intel_pipe_crc_entry {
uint32_t frame;
uint32_t crc[5];
};
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
bool opened; /* exclusive access to the result file */
struct intel_pipe_crc_entry *entries;
enum intel_pipe_crc_source source;
int head, tail;
wait_queue_head_t wq;
};
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@ -1193,7 +1337,10 @@ typedef struct drm_i915_private {
struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
union {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
u32 pm_irq_mask;
@ -1272,6 +1419,10 @@ typedef struct drm_i915_private {
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
@ -1297,17 +1448,18 @@ typedef struct drm_i915_private {
* mchdev_lock in intel_pm.c */
struct intel_ilk_power_mgmt ips;
/* Haswell power well */
struct i915_power_well power_well;
struct i915_power_domains power_domains;
enum no_psr_reason no_psr_reason;
struct i915_psr psr;
struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx;
#ifdef CONFIG_DRM_I915_FBDEV
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
#endif
/*
* The console may be contended at resume, but we don't
@ -1320,6 +1472,7 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
struct list_head context_list;
u32 fdi_rx_config;
@ -1337,6 +1490,9 @@ typedef struct drm_i915_private {
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
/* current hardware state */
struct hsw_wm_values hw;
} wm;
struct i915_package_c8 pc8;
@ -1400,8 +1556,6 @@ struct drm_i915_gem_object {
struct list_head ring_list;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
* This is set if the object is on the active lists (has pending
@ -1487,13 +1641,6 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
@ -1505,11 +1652,14 @@ struct drm_i915_gem_object {
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
unsigned long user_pin_count;
struct drm_file *pin_filp;
/** for phy allocated objects */
@ -1560,48 +1710,56 @@ struct drm_i915_gem_request {
};
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct {
spinlock_t lock;
struct list_head request_list;
struct delayed_work idle_work;
} mm;
struct idr context_idr;
struct i915_ctx_hang_stats hang_stats;
atomic_t rps_wait_boost;
};
#define INTEL_INFO(dev) (to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
(dev)->pci_device == 0x0106 || \
(dev)->pci_device == 0x010A)
#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
(dev)->pdev->device == 0x0152 || \
(dev)->pdev->device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
(dev)->pdev->device == 0x0106 || \
(dev)->pdev->device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0C00)
((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
((dev)->pdev->device & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
/*
* The genX designation typically refers to the render engine, so render
@ -1615,10 +1773,15 @@ struct drm_i915_file_private {
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
#define BLT_RING (1<<BCS)
#define VEBOX_RING (1<<VECS)
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@ -1640,7 +1803,6 @@ struct drm_i915_file_private {
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@ -1648,11 +1810,12 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_IPS(dev) (IS_ULT(dev))
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@ -1668,35 +1831,14 @@ struct drm_i915_file_private {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
#include "i915_trace.h"
/**
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
* enabled, and as soon as new workload arises GPU wakes up automatically as well.
*
* There are different RC6 modes available in Intel GPU, which differentiate
* among each other with the latency required to enter and leave RC6 and
* voltage consumed by the GPU in different states.
*
* The combination of the following flags define which states GPU is allowed
* to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
* RC6pp is deepest RC6. Their support by hardware varies according to the
* GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
#define INTEL_RC6_ENABLE (1<<0)
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
@ -1767,12 +1909,13 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@ -1824,14 +1967,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@ -1870,9 +2010,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@ -1913,7 +2052,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
}
void i915_gem_retire_requests(struct drm_device *dev);
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
@ -1933,11 +2072,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
@ -1964,6 +2103,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
@ -1995,6 +2135,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@ -2031,7 +2174,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
#undef obj_to_ggtt
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
@ -2094,6 +2236,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
unsigned cache_level,
bool mappable,
bool nonblock);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
@ -2133,6 +2276,11 @@ int i915_verify_lists(struct drm_device *dev);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
void intel_display_crc_init(struct drm_device *dev);
#else
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
/* i915_gpu_error.c */
__printf(2, 3)
@ -2186,15 +2334,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
struct intel_encoder;
extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
#else
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
return 0;
}
static inline int
intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
return 0;
}
#endif
/* intel_acpi.c */
@ -2256,8 +2419,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@ -2266,37 +2437,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
#define __i915_read(x) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
__i915_read(8)
__i915_read(16)
__i915_read(32)
__i915_read(64)
#undef __i915_read
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
#define __i915_write(x) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
__i915_write(8)
__i915_write(16)
__i915_write(32)
__i915_write(64)
#undef __i915_write
#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -73,7 +73,7 @@
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
* GPU. The GPU has loaded it's state already and has stored away the gtt
* GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
case 8:
ret = GEN8_CXT_TOTAL_SIZE;
break;
default:
BUG();
}
@ -129,6 +132,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
@ -147,6 +151,7 @@ create_hw_context(struct drm_device *dev,
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
@ -166,6 +171,7 @@ create_hw_context(struct drm_device *dev,
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
list_add_tail(&ctx->link, &dev_priv->context_list);
/* Default context will never have a file_priv */
if (file_priv == NULL)
@ -178,6 +184,10 @@ create_hw_context(struct drm_device *dev,
ctx->file_priv = file_priv;
ctx->id = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
return ctx;
@ -213,7 +223,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@ -226,6 +235,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
goto err_unpin;
}
dev_priv->ring[RCS].default_context = ctx;
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
@ -281,16 +292,24 @@ void i915_gem_context_fini(struct drm_device *dev)
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
i915_gem_object_unpin(dctx->obj);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
drm_gem_object_unreference(&dctx->obj->base);
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->obj->active);
i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
}
i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].default_context = NULL;
dev_priv->ring[RCS].last_context = NULL;
}
static int context_idr_cleanup(int id, void *p, void *data)
@ -393,11 +412,11 @@ static int do_switch(struct i915_hw_context *to)
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
int ret, i;
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
if (from == to)
if (from == to && !to->remap_slice)
return 0;
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@ -420,8 +439,6 @@ static int do_switch(struct i915_hw_context *to)
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
@ -429,6 +446,18 @@ static int do_switch(struct i915_hw_context *to)
return ret;
}
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
ret = i915_gem_l3_remap(ring, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
else
to->remap_slice &= ~(1<<i);
}
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
@ -436,11 +465,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@ -451,17 +477,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
/* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}

Просмотреть файл

@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
if (vma->obj->pin_count)
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
@ -113,7 +116,7 @@ none:
}
/* We expect the caller to unpin, evict all and try again, or give up.
* So calling i915_gem_evict_everything() is unnecessary.
* So calling i915_gem_evict_vm() is unnecessary.
*/
return -ENOSPC;
@ -152,12 +155,48 @@ found:
return ret;
}
/**
* i915_gem_evict_vm - Try to free up VM space
*
* @vm: Address space to evict from
* @do_idle: Boolean directing whether to idle first.
*
* VM eviction is about freeing up virtual address space. If one wants fine
* grained eviction, they should see evict something for more details. In terms
* of freeing up actual system memory, this function may not accomplish the
* desired result. An object may be shared in multiple address space, and this
* function will not assert those objects be freed.
*
* Using do_idle will result in a more complete eviction because it retires, and
* inactivates current BOs.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
{
struct i915_vma *vma, *next;
int ret;
trace_i915_gem_evict_vm(vm);
if (do_idle) {
ret = i915_gpu_idle(vm->dev);
if (ret)
return ret;
i915_gem_retire_requests(vm->dev);
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
return 0;
}
int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm;
struct i915_vma *vma, *next;
bool lists_empty = true;
int ret;
@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
}
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
return 0;
}

Просмотреть файл

@ -33,35 +33,35 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct eb_objects {
struct list_head objects;
struct eb_vmas {
struct list_head vmas;
int and;
union {
struct drm_i915_gem_object *lut[0];
struct i915_vma *lut[0];
struct hlist_head buckets[0];
};
};
static struct eb_objects *
eb_create(struct drm_i915_gem_execbuffer2 *args)
static struct eb_vmas *
eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
{
struct eb_objects *eb = NULL;
struct eb_vmas *eb = NULL;
if (args->flags & I915_EXEC_HANDLE_LUT) {
int size = args->buffer_count;
size *= sizeof(struct drm_i915_gem_object *);
size += sizeof(struct eb_objects);
unsigned size = args->buffer_count;
size *= sizeof(struct i915_vma *);
size += sizeof(struct eb_vmas);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
if (eb == NULL) {
int size = args->buffer_count;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
unsigned size = args->buffer_count;
unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
sizeof(struct eb_vmas),
GFP_TEMPORARY);
if (eb == NULL)
return eb;
@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
} else
eb->and = -args->buffer_count;
INIT_LIST_HEAD(&eb->objects);
INIT_LIST_HEAD(&eb->vmas);
return eb;
}
static void
eb_reset(struct eb_objects *eb)
eb_reset(struct eb_vmas *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static int
eb_lookup_objects(struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
eb_lookup_vmas(struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
struct i915_address_space *vm,
struct drm_file *file)
{
int i;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret = 0;
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
/* Grab a reference to the object and release the lock so we can lookup
* or create the VMA without using GFP_ATOMIC */
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
return -ENOENT;
ret = -ENOENT;
goto out;
}
if (!list_empty(&obj->exec_list)) {
if (!list_empty(&obj->obj_exec_link)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
return -EINVAL;
ret = -EINVAL;
goto out;
}
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->exec_list, &eb->objects);
obj->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = obj;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
obj->exec_handle = handle;
hlist_add_head(&obj->exec_node,
&eb->buckets[handle & eb->and]);
}
list_add_tail(&obj->obj_exec_link, &objects);
}
spin_unlock(&file->table_lock);
return 0;
i = 0;
list_for_each_entry(obj, &objects, obj_exec_link) {
struct i915_vma *vma;
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
* vmas which are not actually bound. And since only
* lookup_or_create exists as an interface to get at the vma
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
goto out;
}
list_add_tail(&vma->exec_list, &eb->vmas);
vma->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = vma;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
vma->exec_handle = handle;
hlist_add_head(&vma->exec_node,
&eb->buckets[handle & eb->and]);
}
++i;
}
out:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
if (ret)
drm_gem_object_unreference(&obj->base);
}
return ret;
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
vma = hlist_entry(node, struct i915_vma, exec_node);
if (vma->exec_handle == handle)
return vma;
}
return NULL;
}
}
static void
eb_destroy(struct eb_objects *eb)
{
while (!list_empty(&eb->objects)) {
struct drm_i915_gem_object *obj;
static void eb_destroy(struct eb_vmas *eb) {
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
vma = list_first_entry(&eb->vmas,
struct i915_vma,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
list_del_init(&vma->exec_list);
drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
!obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@ -175,17 +212,31 @@ static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
int ret = -EINVAL;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
return ret;
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
if (INTEL_INFO(dev)->gen >= 8) {
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
if (page_offset == 0) {
kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
}
*(uint32_t *)(vaddr + page_offset) = 0;
}
kunmap_atomic(vaddr);
return 0;
@ -216,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
iowrite32(reloc->delta, reloc_entry);
if (INTEL_INFO(dev)->gen >= 8) {
reloc_entry += 1;
if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page = io_mapping_map_atomic_wc(
dev_priv->gtt.mappable,
reloc->offset + sizeof(uint32_t));
reloc_entry = reloc_page;
}
iowrite32(0, reloc_entry);
}
io_mapping_unmap_atomic(reloc_page);
return 0;
@ -223,22 +289,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
if (unlikely(target_obj == NULL))
target_vma = eb_get_vma(eb, reloc->target_handle);
if (unlikely(target_vma == NULL))
return -ENOENT;
target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base;
target_i915_obj = to_intel_bo(target_obj);
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@ -284,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
if (unlikely(reloc->offset >
obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
@ -320,14 +389,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct i915_address_space *vm)
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct eb_vmas *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
user_relocs = to_user_ptr(entry->relocs_ptr);
@ -346,8 +414,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
vm);
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
vma->vm);
if (ret)
return ret;
@ -368,17 +436,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
}
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs,
struct i915_address_space *vm)
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
vm);
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
vma->vm);
if (ret)
return ret;
}
@ -387,10 +454,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
i915_gem_execbuffer_relocate(struct eb_objects *eb,
i915_gem_execbuffer_relocate(struct eb_vmas *eb,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
@ -401,8 +468,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
@ -415,31 +482,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
need_reloc_mappable(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
return entry->relocation_count && !use_cpu_reloc(obj);
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
i915_is_ggtt(vma->vm);
}
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct i915_address_space *vm,
bool *need_reloc)
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_ring_buffer *ring,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
struct drm_i915_gem_object *obj = vma->obj;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
@ -467,8 +535,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
entry->offset = i915_gem_obj_offset(obj, vm);
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
*need_reloc = true;
}
@ -485,14 +553,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
}
static void
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
struct drm_i915_gem_object *obj = vma->obj;
if (!i915_gem_obj_bound_any(obj))
if (!drm_mm_node_allocated(&vma->node))
return;
entry = obj->exec_entry;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
@ -505,41 +574,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
struct i915_address_space *vm,
struct list_head *vmas,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
struct i915_vma *vma;
struct i915_address_space *vm;
struct list_head ordered_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
if (list_empty(vmas))
return 0;
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
INIT_LIST_HEAD(&ordered_vmas);
while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
vma = list_first_entry(vmas, struct i915_vma, exec_list);
obj = vma->obj;
entry = vma->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
list_move(&vma->exec_list, &ordered_vmas);
else
list_move_tail(&obj->exec_list, &ordered_objects);
list_move_tail(&vma->exec_list, &ordered_vmas);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
list_splice(&ordered_vmas, vmas);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
@ -558,52 +632,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
int ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
u32 obj_offset;
if (!i915_gem_obj_bound(obj, vm))
obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
continue;
obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vm));
!i915_is_ggtt(vma->vm));
if ((entry->alignment &&
obj_offset & (entry->alignment - 1)) ||
vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (i915_gem_obj_bound(obj, vm))
list_for_each_entry(vma, vmas, exec_list) {
if (drm_mm_node_allocated(&vma->node))
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
err: /* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list)
i915_gem_execbuffer_unreserve_object(obj);
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
if (ret != -ENOSPC || retry++)
return ret;
ret = i915_gem_evict_everything(ring->dev);
ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
} while (1);
@ -614,24 +688,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_vma *vma;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
int count = args->buffer_count;
unsigned count = args->buffer_count;
if (WARN_ON(list_empty(&eb->vmas)))
return 0;
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
/* We may process another execbuffer during the unlock... */
while (!list_empty(&eb->objects)) {
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
drm_gem_object_unreference(&vma->obj->base);
}
mutex_unlock(&dev->struct_mutex);
@ -695,20 +772,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
ret = eb_lookup_objects(eb, exec, args, file);
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset],
vm);
list_for_each_entry(vma, &eb->vmas, exec_list) {
int offset = vma->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
reloc + reloc_offset[offset]);
if (ret)
goto err;
}
@ -727,14 +803,15 @@ err:
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
struct list_head *vmas)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
@ -771,8 +848,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
int relocs_total = 0;
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
unsigned relocs_total = 0;
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@ -809,13 +886,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct i915_address_space *vm,
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
@ -825,9 +902,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
/* FIXME: This lookup gets fixed later <-- danvet */
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
@ -885,10 +960,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
struct i915_ctx_hang_stats *hs;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
@ -1000,7 +1076,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
@ -1025,7 +1102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
eb = eb_create(args);
eb = eb_create(args, vm);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@ -1033,18 +1110,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
ret = eb_lookup_objects(eb, exec, args, file);
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
@ -1054,7 +1129,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec, vm);
eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@ -1071,15 +1146,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but let's be paranoid and do it
* unconditionally for now. */
* hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
if (IS_ERR(hs)) {
ret = PTR_ERR(hs);
goto err;
}
if (hs->banned) {
ret = -EIO;
goto err;
}
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@ -1131,7 +1216,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:

Просмотреть файл

@ -30,6 +30,8 @@
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@ -57,6 +59,41 @@
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
#define GEN8_LEGACY_PDPS 4
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
{
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
if (level != I915_CACHE_NONE)
pte |= PPAT_CACHED_INDEX;
else
pte |= PPAT_UNCACHED_INDEX;
return pte;
}
static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
else
pde |= PPAT_UNCACHED_INDEX;
return pde;
}
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
@ -158,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
uint64_t val)
{
int ret;
BUG_ON(entry >= 4);
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, (u32)(val >> 32));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, (u32)(val));
intel_ring_advance(ring);
return 0;
}
static int gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i, j, ret;
/* bit of a hack to find the actual last used pd */
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
for_each_ring(ring, dev_priv, j) {
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
}
}
return 0;
}
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
unsigned last_pte, i;
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
last_pte = first_pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
pt_vaddr = kmap_atomic(page_table);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
kunmap_atomic(pt_vaddr);
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pt++;
}
}
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
struct sg_page_iter sg_iter;
pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
dma_addr_t page_addr;
page_addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT);
pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
true);
if (++act_pte == GEN8_PTES_PER_PAGE) {
kunmap_atomic(pt_vaddr);
act_pt++;
pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
act_pte = 0;
}
}
kunmap_atomic(pt_vaddr);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
if (ppgtt->pd_dma_addr[i]) {
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pd_dma_addr[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
if (addr)
pci_unmap_page(ppgtt->base.dev->pdev,
addr,
PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
}
}
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
}
/**
* GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
* net effect resembling a 2-level page table in normal x86 terms. Each PDP
* represents 1GB of memory
* 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
*
* TODO: Do something with the size parameter
**/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
struct page *pt_pages;
int i, j, ret = -ENOMEM;
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
/* FIXME: split allocation into smaller pieces. For now we only ever do
* this once, but with full PPGTT, the multiple contiguous allocations
* will be bad.
*/
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
if (!ppgtt->pd_pages)
return -ENOMEM;
pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
if (!pt_pages) {
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
return -ENOMEM;
}
ppgtt->gen8_pt_pages = pt_pages;
ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
ppgtt->enable = gen8_ppgtt_enable;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
/*
* - Create a mapping for the page directories.
* - For each page directory:
* allocate space for page table mappings.
* map each page table
*/
for (i = 0; i < max_pdp; i++) {
dma_addr_t temp;
temp = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[i], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
goto err_out;
ppgtt->pd_dma_addr[i] = temp;
ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
goto err_out;
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
temp = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
goto err_out;
ppgtt->gen8_pt_dma_addr[i][j] = temp;
}
}
/* For now, the PPGTT helper functions all require that the PDEs are
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again */
for (i = 0; i < max_pdp; i++) {
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
kunmap_atomic(pd_vaddr);
}
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
ppgtt->num_pt_pages,
(ppgtt->num_pt_pages - num_pt_pages) +
size % (1<<30));
return 0;
err_out:
ppgtt->base.cleanup(&ppgtt->base);
return ret;
}
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@ -342,7 +630,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
return -ENOMEM;
@ -353,7 +641,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
goto err_pt_alloc;
}
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
@ -410,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev))
ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
@ -573,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
#else
iowrite32((u32)pte, addr);
iowrite32(pte >> 32, addr + 4);
#endif
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen8_gtt_pte_t __iomem *gtt_entries =
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT);
gen8_set_pte(&gtt_entries[i],
gen8_pte_encode(addr, level, true));
i++;
}
/*
* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true));
#if 0 /* TODO: Still needed on GEN8? */
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
#endif
}
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
@ -615,6 +956,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = gen8_pte_encode(vm->scratch.addr,
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
@ -638,7 +1003,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
readl(gtt_base);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
@ -720,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
*end -= 4096;
}
}
void i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
@ -817,7 +1182,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->gtt.base.mm);
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
if (INTEL_INFO(dev)->gen < 8)
gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@ -867,6 +1233,15 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 20;
}
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
return bdw_gmch_ctl << 20;
}
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@ -874,6 +1249,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 25; /* 32 MB units */
}
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
return bdw_gmch_ctl << 25; /* 32 MB units */
}
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(dev_priv->gtt.gsm);
}
return ret;
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
{
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
/* FIXME(BDW): Bspec is completely confused about cache control bits. */
#define GEN8_PPAT_LLC (1<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
uint64_t pat;
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT, pat);
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}
static int gen8_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
/* TODO: We're not aware of mappable constraints on gen8 yet */
*mappable_base = pci_resource_start(dev->pdev, 2);
*mappable_end = pci_resource_len(dev->pdev, 2);
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
gen8_setup_private_ppat(dev_priv);
ret = ggtt_probe_common(dev, gtt_size);
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
return ret;
}
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
@ -881,7 +1358,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
@ -901,24 +1377,13 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(dev);
if (ret)
DRM_ERROR("Scratch setup failed\n");
ret = ggtt_probe_common(dev, gtt_size);
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
@ -972,7 +1437,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 5) {
gtt->gtt_probe = i915_gmch_probe;
gtt->base.cleanup = i915_gmch_remove;
} else {
} else if (INTEL_INFO(dev)->gen < 8) {
gtt->gtt_probe = gen6_gmch_probe;
gtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
@ -985,6 +1450,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = snb_pte_encode;
} else {
dev_priv->gtt.gtt_probe = gen8_gmch_probe;
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
}
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,

Просмотреть файл

@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
vma = i915_gem_vma_create(obj, ggtt);
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;

Просмотреть файл

@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
if (obj->pin_count) {
if (obj->pin_count || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");

Просмотреть файл

@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
{
switch (a) {
case HANGCHECK_IDLE:
return "idle";
case HANGCHECK_WAIT:
return "wait";
case HANGCHECK_ACTIVE:
return "active";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
return "hung";
}
return "unknown";
}
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
@ -231,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
if (INTEL_INFO(dev)->gen >= 4)
err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
if (INTEL_INFO(dev)->gen >= 4)
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@ -255,6 +274,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(error->hangcheck_action[ring]),
error->hangcheck_score[ring]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@ -283,13 +305,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@ -601,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 8:
case 7:
case 6:
for (i = 0; i < dev_priv->num_fence_regs; i++)
@ -703,6 +727,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@ -720,6 +745,9 @@ static void i915_record_ring_state(struct drm_device *dev,
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
error->hangcheck_score[ring->id] = ring->hangcheck.score;
error->hangcheck_action[ring->id] = ring->hangcheck.action;
}
@ -769,7 +797,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].num_requests = count;
error->ring[i].requests =
kmalloc(count*sizeof(struct drm_i915_error_request),
kcalloc(count, sizeof(*error->ring[i].requests),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
@ -811,7 +839,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
if (active_bo)
pinned_bo = active_bo + error->active_bo_count[ndx];
}
@ -885,8 +913,12 @@ void i915_capture_error_state(struct drm_device *dev)
return;
}
DRM_INFO("capturing error event; look for more information in "
"/sys/class/drm/card%d/error\n", dev->primary->index);
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
dev->primary->index);
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
kref_init(&error->ref);
error->eir = I915_READ(EIR);
@ -988,6 +1020,7 @@ const char *i915_cache_level_str(int type)
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT";
default: return "";
}
}
@ -1012,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -214,6 +214,22 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->regfile.saveBLC_PWM_CTL =
I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
dev_priv->regfile.saveBLC_HIST_CTL =
I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
dev_priv->regfile.saveBLC_PWM_CTL2 =
I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
dev_priv->regfile.saveBLC_PWM_CTL_B =
I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
dev_priv->regfile.saveBLC_HIST_CTL_B =
I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
dev_priv->regfile.saveBLC_PWM_CTL2_B =
I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@ -302,6 +318,19 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else if (IS_VALLEYVIEW(dev)) {
I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
dev_priv->regfile.saveBLC_PWM_CTL2);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
@ -340,7 +369,9 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, LBB,
&dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
@ -367,7 +398,8 @@ int i915_save_state(struct drm_device *dev)
intel_disable_gt_powersave(dev);
/* Cache mode state */
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@ -390,7 +422,9 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
if (INTEL_INFO(dev)->gen <= 4)
pci_write_config_byte(dev->pdev, LBB,
dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
@ -414,7 +448,9 @@ int i915_restore_state(struct drm_device *dev)
}
/* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
if (INTEL_INFO(dev)->gen < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
/* Memory arbitration state */
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);

Просмотреть файл

@ -32,30 +32,50 @@
#include "intel_drv.h"
#include "i915_drv.h"
#define dev_to_drm_minor(d) dev_get_drvdata((d))
#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
if (!intel_enable_rc6(dev))
return 0;
raw_time = I915_READ(reg) * 128ULL;
return DIV_ROUND_UP_ULL(raw_time, 100000);
/* On VLV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
u32 clkctl2;
clkctl2 = I915_READ(VLV_CLK_CTL2) >>
CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!clkctl2) {
WARN(!clkctl2, "bogus CZ count value");
return 0;
}
units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
div = 1000000ULL * bias;
}
raw_time = I915_READ(reg) * units;
return DIV_ROUND_UP_ULL(raw_time, div);
}
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *dminor = dev_to_drm_minor(kdev);
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
}
static ssize_t
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *dminor = dev_get_drvdata(kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
static ssize_t
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
if (IS_VALLEYVIEW(dminor->dev))
rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
static ssize_t
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
if (IS_VALLEYVIEW(dminor->dev))
rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
if (!HAS_L3_GPU_CACHE(dev))
if (!HAS_L3_DPF(dev))
return -EPERM;
if (offset % 4 != 0)
@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
uint32_t misccpctl;
int i, ret;
int slice = (int)(uintptr_t)attr->private;
int ret;
count = round_down(count, 4);
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
if (dev_priv->l3_parity.remap_info[slice])
memcpy(buf,
dev_priv->l3_parity.remap_info[slice] + (offset/4),
count);
else
memset(buf, 0, count);
mutex_unlock(&drm_dev->struct_mutex);
return i - offset;
return count;
}
static ssize_t
@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
struct i915_hw_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
if (dev_priv->hw_contexts_disabled)
return -ENXIO;
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
if (!dev_priv->l3_parity.remap_info) {
if (!dev_priv->l3_parity.remap_info[slice]) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
dev_priv->l3_parity.remap_info = temp;
dev_priv->l3_parity.remap_info[slice] = temp;
memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4),
count);
memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
i915_gem_l3_remap(drm_dev);
/* NB: We defer the remapping until we switch to the context */
list_for_each_entry(ctx, &dev_priv->context_list, link)
ctx->remap_slice |= (1<<slice);
mutex_unlock(&drm_dev->struct_mutex);
@ -200,17 +232,29 @@ static struct bin_attribute dpf_attrs = {
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL
.mmap = NULL,
.private = (void *)0
};
static struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL,
.private = (void *)1
};
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
@ -227,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -238,11 +282,13 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@ -257,7 +303,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@ -267,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
@ -310,11 +358,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@ -329,7 +379,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min;
@ -339,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {
@ -388,7 +440,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap;
@ -436,7 +488,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
@ -471,7 +523,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
@ -501,27 +553,34 @@ void i915_setup_sysfs(struct drm_device *dev)
#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
ret = sysfs_merge_group(&dev->primary->kdev.kobj,
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
#endif
if (HAS_L3_GPU_CACHE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
if (NUM_L3_SLICES(dev) > 1) {
ret = device_create_bin_file(dev->primary->kdev,
&dpf_attrs_1);
if (ret)
DRM_ERROR("l3 parity slice 1 setup failed\n");
}
}
ret = 0;
if (IS_VALLEYVIEW(dev))
ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
@ -529,13 +588,14 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
if (IS_VALLEYVIEW(dev))
sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
else
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
#endif
}

Просмотреть файл

@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_evict_vm,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
);
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_ring_buffer *from,
struct intel_ring_buffer *to,
u32 seqno),
TP_ARGS(from, to, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, sync_from)
__field(u32, sync_to)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
__entry->seqno = seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
__entry->dev,
__entry->sync_from, __entry->sync_to,
__entry->seqno)
);
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
TRACE_EVENT(i915_gem_request_complete,
TP_PROTO(struct intel_ring_buffer *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = ring->get_seqno(ring, false);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше