Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel
* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel:
drm/i915: fix scheduling while holding the new active list spinlock
drm/i915: Allow tiling of objects with bit 17 swizzling by the CPU.
drm/i915: Correctly set the write flag for get_user_pages in pread.
drm/i915: Fix use of uninitialized var in 40a5f0de
drm/i915: indicate framebuffer restore key in SysRq help message
drm/i915: sync hdmi detection by hdmi identifier with 2D
drm/i915: Fix a mismerge of the IGD patch (new .find_pll hooks missed)
drm/i915: Implement batch and ring buffer dumping
This commit is contained in:
Коммит
b897e6fbc4
|
@ -446,6 +446,9 @@ struct drm_i915_gem_object {
|
||||||
uint32_t tiling_mode;
|
uint32_t tiling_mode;
|
||||||
uint32_t stride;
|
uint32_t stride;
|
||||||
|
|
||||||
|
/** Record of address bit 17 of each page at last unbind. */
|
||||||
|
long *bit_17;
|
||||||
|
|
||||||
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
|
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
|
||||||
uint32_t agp_type;
|
uint32_t agp_type;
|
||||||
|
|
||||||
|
@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||||
struct drm_gem_object *obj);
|
struct drm_gem_object *obj);
|
||||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||||
|
int i915_gem_object_get_pages(struct drm_gem_object *obj);
|
||||||
|
void i915_gem_object_put_pages(struct drm_gem_object *obj);
|
||||||
|
|
||||||
/* i915_gem_tiling.c */
|
/* i915_gem_tiling.c */
|
||||||
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
||||||
|
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
|
||||||
|
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
|
||||||
|
|
||||||
/* i915_gem_debug.c */
|
/* i915_gem_debug.c */
|
||||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||||
|
|
|
@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
|
||||||
uint64_t offset,
|
uint64_t offset,
|
||||||
uint64_t size);
|
uint64_t size);
|
||||||
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
|
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
|
||||||
static int i915_gem_object_get_pages(struct drm_gem_object *obj);
|
|
||||||
static void i915_gem_object_put_pages(struct drm_gem_object *obj);
|
|
||||||
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
|
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
|
||||||
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
|
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
|
||||||
unsigned alignment);
|
unsigned alignment);
|
||||||
|
@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages,
|
||||||
int length)
|
int length)
|
||||||
{
|
{
|
||||||
char __iomem *vaddr;
|
char __iomem *vaddr;
|
||||||
int ret;
|
int unwritten;
|
||||||
|
|
||||||
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
|
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
|
||||||
if (vaddr == NULL)
|
if (vaddr == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
|
unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
|
||||||
kunmap_atomic(vaddr, KM_USER0);
|
kunmap_atomic(vaddr, KM_USER0);
|
||||||
|
|
||||||
return ret;
|
if (unwritten)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
drm_i915_private_t *dev_priv = obj->dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
|
|
||||||
|
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
||||||
|
obj_priv->tiling_mode != I915_TILING_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
slow_shmem_bit17_copy(struct page *gpu_page,
|
||||||
|
int gpu_offset,
|
||||||
|
struct page *cpu_page,
|
||||||
|
int cpu_offset,
|
||||||
|
int length,
|
||||||
|
int is_read)
|
||||||
|
{
|
||||||
|
char *gpu_vaddr, *cpu_vaddr;
|
||||||
|
|
||||||
|
/* Use the unswizzled path if this page isn't affected. */
|
||||||
|
if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
|
||||||
|
if (is_read)
|
||||||
|
return slow_shmem_copy(cpu_page, cpu_offset,
|
||||||
|
gpu_page, gpu_offset, length);
|
||||||
|
else
|
||||||
|
return slow_shmem_copy(gpu_page, gpu_offset,
|
||||||
|
cpu_page, cpu_offset, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
|
||||||
|
if (gpu_vaddr == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
|
||||||
|
if (cpu_vaddr == NULL) {
|
||||||
|
kunmap_atomic(gpu_vaddr, KM_USER0);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
|
||||||
|
* XORing with the other bits (A9 for Y, A9 and A10 for X)
|
||||||
|
*/
|
||||||
|
while (length > 0) {
|
||||||
|
int cacheline_end = ALIGN(gpu_offset + 1, 64);
|
||||||
|
int this_length = min(cacheline_end - gpu_offset, length);
|
||||||
|
int swizzled_gpu_offset = gpu_offset ^ 64;
|
||||||
|
|
||||||
|
if (is_read) {
|
||||||
|
memcpy(cpu_vaddr + cpu_offset,
|
||||||
|
gpu_vaddr + swizzled_gpu_offset,
|
||||||
|
this_length);
|
||||||
|
} else {
|
||||||
|
memcpy(gpu_vaddr + swizzled_gpu_offset,
|
||||||
|
cpu_vaddr + cpu_offset,
|
||||||
|
this_length);
|
||||||
|
}
|
||||||
|
cpu_offset += this_length;
|
||||||
|
gpu_offset += this_length;
|
||||||
|
length -= this_length;
|
||||||
|
}
|
||||||
|
|
||||||
|
kunmap_atomic(cpu_vaddr, KM_USER1);
|
||||||
|
kunmap_atomic(gpu_vaddr, KM_USER0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the fast shmem pread path, which attempts to copy_from_user directly
|
* This is the fast shmem pread path, which attempts to copy_from_user directly
|
||||||
* from the backing pages of the object to the user's address space. On a
|
* from the backing pages of the object to the user's address space. On a
|
||||||
|
@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
int page_length;
|
int page_length;
|
||||||
int ret;
|
int ret;
|
||||||
uint64_t data_ptr = args->data_ptr;
|
uint64_t data_ptr = args->data_ptr;
|
||||||
|
int do_bit17_swizzling;
|
||||||
|
|
||||||
remain = args->size;
|
remain = args->size;
|
||||||
|
|
||||||
|
@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
|
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
|
||||||
num_pages, 0, 0, user_pages, NULL);
|
num_pages, 1, 0, user_pages, NULL);
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
if (pinned_pages < num_pages) {
|
if (pinned_pages < num_pages) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto fail_put_user_pages;
|
goto fail_put_user_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
ret = i915_gem_object_get_pages(obj);
|
ret = i915_gem_object_get_pages(obj);
|
||||||
|
@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
if ((data_page_offset + page_length) > PAGE_SIZE)
|
if ((data_page_offset + page_length) > PAGE_SIZE)
|
||||||
page_length = PAGE_SIZE - data_page_offset;
|
page_length = PAGE_SIZE - data_page_offset;
|
||||||
|
|
||||||
ret = slow_shmem_copy(user_pages[data_page_index],
|
if (do_bit17_swizzling) {
|
||||||
data_page_offset,
|
ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
||||||
obj_priv->pages[shmem_page_index],
|
shmem_page_offset,
|
||||||
shmem_page_offset,
|
user_pages[data_page_index],
|
||||||
page_length);
|
data_page_offset,
|
||||||
|
page_length,
|
||||||
|
1);
|
||||||
|
} else {
|
||||||
|
ret = slow_shmem_copy(user_pages[data_page_index],
|
||||||
|
data_page_offset,
|
||||||
|
obj_priv->pages[shmem_page_index],
|
||||||
|
shmem_page_offset,
|
||||||
|
page_length);
|
||||||
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail_put_pages;
|
goto fail_put_pages;
|
||||||
|
|
||||||
|
@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
|
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||||
if (ret != 0)
|
|
||||||
ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
|
ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
|
||||||
|
} else {
|
||||||
|
ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
|
||||||
|
if (ret != 0)
|
||||||
|
ret = i915_gem_shmem_pread_slow(dev, obj, args,
|
||||||
|
file_priv);
|
||||||
|
}
|
||||||
|
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
|
|
||||||
|
@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
int page_length;
|
int page_length;
|
||||||
int ret;
|
int ret;
|
||||||
uint64_t data_ptr = args->data_ptr;
|
uint64_t data_ptr = args->data_ptr;
|
||||||
|
int do_bit17_swizzling;
|
||||||
|
|
||||||
remain = args->size;
|
remain = args->size;
|
||||||
|
|
||||||
|
@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
goto fail_put_user_pages;
|
goto fail_put_user_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
ret = i915_gem_object_get_pages(obj);
|
ret = i915_gem_object_get_pages(obj);
|
||||||
|
@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
if ((data_page_offset + page_length) > PAGE_SIZE)
|
if ((data_page_offset + page_length) > PAGE_SIZE)
|
||||||
page_length = PAGE_SIZE - data_page_offset;
|
page_length = PAGE_SIZE - data_page_offset;
|
||||||
|
|
||||||
ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
|
if (do_bit17_swizzling) {
|
||||||
shmem_page_offset,
|
ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
||||||
user_pages[data_page_index],
|
shmem_page_offset,
|
||||||
data_page_offset,
|
user_pages[data_page_index],
|
||||||
page_length);
|
data_page_offset,
|
||||||
|
page_length,
|
||||||
|
0);
|
||||||
|
} else {
|
||||||
|
ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
|
||||||
|
shmem_page_offset,
|
||||||
|
user_pages[data_page_index],
|
||||||
|
data_page_offset,
|
||||||
|
page_length);
|
||||||
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail_put_pages;
|
goto fail_put_pages;
|
||||||
|
|
||||||
|
@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||||
ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
|
ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
|
||||||
file_priv);
|
file_priv);
|
||||||
}
|
}
|
||||||
|
} else if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||||
|
ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
|
||||||
} else {
|
} else {
|
||||||
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
|
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
|
||||||
if (ret == -EFAULT) {
|
if (ret == -EFAULT) {
|
||||||
|
@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
i915_gem_object_put_pages(struct drm_gem_object *obj)
|
i915_gem_object_put_pages(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
|
@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
|
||||||
if (--obj_priv->pages_refcount != 0)
|
if (--obj_priv->pages_refcount != 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
||||||
|
i915_gem_object_save_bit_17_swizzle(obj);
|
||||||
|
|
||||||
for (i = 0; i < page_count; i++)
|
for (i = 0; i < page_count; i++)
|
||||||
if (obj_priv->pages[i] != NULL) {
|
if (obj_priv->pages[i] != NULL) {
|
||||||
if (obj_priv->dirty)
|
if (obj_priv->dirty)
|
||||||
|
@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev,
|
||||||
|
|
||||||
if (obj->write_domain != 0)
|
if (obj->write_domain != 0)
|
||||||
i915_gem_object_move_to_flushing(obj);
|
i915_gem_object_move_to_flushing(obj);
|
||||||
else
|
else {
|
||||||
|
/* Take a reference on the object so it won't be
|
||||||
|
* freed while the spinlock is held. The list
|
||||||
|
* protection for this spinlock is safe when breaking
|
||||||
|
* the lock like this since the next thing we do
|
||||||
|
* is just get the head of the list again.
|
||||||
|
*/
|
||||||
|
drm_gem_object_reference(obj);
|
||||||
i915_gem_object_move_to_inactive(obj);
|
i915_gem_object_move_to_inactive(obj);
|
||||||
|
spin_unlock(&dev_priv->mm.active_list_lock);
|
||||||
|
drm_gem_object_unreference(obj);
|
||||||
|
spin_lock(&dev_priv->mm.active_list_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
spin_unlock(&dev_priv->mm.active_list_lock);
|
spin_unlock(&dev_priv->mm.active_list_lock);
|
||||||
|
@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
int
|
||||||
i915_gem_object_get_pages(struct drm_gem_object *obj)
|
i915_gem_object_get_pages(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
|
@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
|
||||||
}
|
}
|
||||||
obj_priv->pages[i] = page;
|
obj_priv->pages[i] = page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
||||||
|
i915_gem_object_do_bit_17_swizzle(obj);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
|
||||||
drm_free(*relocs, reloc_count * sizeof(**relocs),
|
drm_free(*relocs, reloc_count * sizeof(**relocs),
|
||||||
DRM_MEM_DRIVER);
|
DRM_MEM_DRIVER);
|
||||||
*relocs = NULL;
|
*relocs = NULL;
|
||||||
return ret;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
reloc_index += exec_list[i].relocation_count;
|
reloc_index += exec_list[i].relocation_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
|
||||||
struct drm_i915_gem_relocation_entry *relocs)
|
struct drm_i915_gem_relocation_entry *relocs)
|
||||||
{
|
{
|
||||||
uint32_t reloc_count = 0, i;
|
uint32_t reloc_count = 0, i;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
for (i = 0; i < buffer_count; i++) {
|
for (i = 0; i < buffer_count; i++) {
|
||||||
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
struct drm_i915_gem_relocation_entry __user *user_relocs;
|
||||||
|
int unwritten;
|
||||||
|
|
||||||
user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
|
user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
|
||||||
|
|
||||||
if (ret == 0) {
|
unwritten = copy_to_user(user_relocs,
|
||||||
ret = copy_to_user(user_relocs,
|
&relocs[reloc_count],
|
||||||
&relocs[reloc_count],
|
exec_list[i].relocation_count *
|
||||||
exec_list[i].relocation_count *
|
sizeof(*relocs));
|
||||||
sizeof(*relocs));
|
|
||||||
|
if (unwritten) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
reloc_count += exec_list[i].relocation_count;
|
reloc_count += exec_list[i].relocation_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err:
|
||||||
drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
|
drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||||
exec_offset = exec_list[args->buffer_count - 1].offset;
|
exec_offset = exec_list[args->buffer_count - 1].offset;
|
||||||
|
|
||||||
#if WATCH_EXEC
|
#if WATCH_EXEC
|
||||||
i915_gem_dump_object(object_list[args->buffer_count - 1],
|
i915_gem_dump_object(batch_obj,
|
||||||
args->batch_len,
|
args->batch_len,
|
||||||
__func__,
|
__func__,
|
||||||
~0);
|
~0);
|
||||||
|
@ -3308,10 +3430,12 @@ err:
|
||||||
(uintptr_t) args->buffers_ptr,
|
(uintptr_t) args->buffers_ptr,
|
||||||
exec_list,
|
exec_list,
|
||||||
sizeof(*exec_list) * args->buffer_count);
|
sizeof(*exec_list) * args->buffer_count);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
ret = -EFAULT;
|
||||||
DRM_ERROR("failed to copy %d exec entries "
|
DRM_ERROR("failed to copy %d exec entries "
|
||||||
"back to user (%d)\n",
|
"back to user (%d)\n",
|
||||||
args->buffer_count, ret);
|
args->buffer_count, ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy the updated relocations out regardless of current error
|
/* Copy the updated relocations out regardless of current error
|
||||||
|
@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
|
||||||
i915_gem_free_mmap_offset(obj);
|
i915_gem_free_mmap_offset(obj);
|
||||||
|
|
||||||
drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
|
drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
|
||||||
|
kfree(obj_priv->bit_17);
|
||||||
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
|
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
|
||||||
|
{
|
||||||
|
int page, i;
|
||||||
|
uint32_t *mem;
|
||||||
|
|
||||||
|
for (page = 0; page < page_count; page++) {
|
||||||
|
mem = kmap(pages[page]);
|
||||||
|
for (i = 0; i < PAGE_SIZE; i += 4)
|
||||||
|
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
|
||||||
|
kunmap(pages[page]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_batchbuffer_info(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
struct drm_gem_object *obj;
|
||||||
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
spin_lock(&dev_priv->mm.active_list_lock);
|
||||||
|
|
||||||
|
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
||||||
|
obj = obj_priv->obj;
|
||||||
|
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||||
|
ret = i915_gem_object_get_pages(obj);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to get pages: %d\n", ret);
|
||||||
|
spin_unlock(&dev_priv->mm.active_list_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
|
||||||
|
i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
|
||||||
|
|
||||||
|
i915_gem_object_put_pages(obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&dev_priv->mm.active_list_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_ringbuffer_data(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
u8 *virt;
|
||||||
|
uint32_t *ptr, off;
|
||||||
|
|
||||||
|
if (!dev_priv->ring.ring_obj) {
|
||||||
|
seq_printf(m, "No ringbuffer setup\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
virt = dev_priv->ring.virtual_start;
|
||||||
|
|
||||||
|
for (off = 0; off < dev_priv->ring.Size; off += 4) {
|
||||||
|
ptr = (uint32_t *)(virt + off);
|
||||||
|
seq_printf(m, "%08x : %08x\n", off, *ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_ringbuffer_info(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
unsigned int head, tail, mask;
|
||||||
|
|
||||||
|
head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
|
||||||
|
tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
|
||||||
|
mask = dev_priv->ring.tail_mask;
|
||||||
|
|
||||||
|
seq_printf(m, "RingHead : %08x\n", head);
|
||||||
|
seq_printf(m, "RingTail : %08x\n", tail);
|
||||||
|
seq_printf(m, "RingMask : %08x\n", mask);
|
||||||
|
seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
|
||||||
|
seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static struct drm_info_list i915_gem_debugfs_list[] = {
|
static struct drm_info_list i915_gem_debugfs_list[] = {
|
||||||
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
|
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
|
||||||
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
|
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
|
||||||
|
@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
|
||||||
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
||||||
{"i915_gem_interrupt", i915_interrupt_info, 0},
|
{"i915_gem_interrupt", i915_interrupt_info, 0},
|
||||||
{"i915_gem_hws", i915_hws_info, 0},
|
{"i915_gem_hws", i915_hws_info, 0},
|
||||||
|
{"i915_ringbuffer_data", i915_ringbuffer_data, 0},
|
||||||
|
{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
|
||||||
|
{"i915_batchbuffers", i915_batchbuffer_info, 0},
|
||||||
};
|
};
|
||||||
#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
|
#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "linux/string.h"
|
||||||
|
#include "linux/bitops.h"
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
#include "drm.h"
|
#include "drm.h"
|
||||||
#include "i915_drm.h"
|
#include "i915_drm.h"
|
||||||
|
@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||||
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
|
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
|
||||||
} else {
|
} else {
|
||||||
/* Bit 17 swizzling by the CPU in addition. */
|
/* Bit 17 swizzling by the CPU in addition. */
|
||||||
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
|
||||||
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||||
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
|
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
|
||||||
else
|
else
|
||||||
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
|
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
|
||||||
|
|
||||||
|
/* Hide bit 17 swizzling from the user. This prevents old Mesa
|
||||||
|
* from aborting the application on sw fallbacks to bit 17,
|
||||||
|
* and we use the pread/pwrite bit17 paths to swizzle for it.
|
||||||
|
* If there was a user that was relying on the swizzle
|
||||||
|
* information for drm_intel_bo_map()ed reads/writes this would
|
||||||
|
* break it, but we don't have any of those.
|
||||||
|
*/
|
||||||
|
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
|
||||||
|
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
|
||||||
|
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||||
|
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
|
||||||
|
|
||||||
/* If we can't handle the swizzling, make it untiled. */
|
/* If we can't handle the swizzling, make it untiled. */
|
||||||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
|
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
|
||||||
args->tiling_mode = I915_TILING_NONE;
|
args->tiling_mode = I915_TILING_NONE;
|
||||||
|
@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||||
DRM_ERROR("unknown tiling mode\n");
|
DRM_ERROR("unknown tiling mode\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
|
||||||
|
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
|
||||||
|
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
|
||||||
|
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||||
|
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
|
||||||
|
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Swap every 64 bytes of this page around, to account for it having a new
|
||||||
|
* bit 17 of its physical address and therefore being interpreted differently
|
||||||
|
* by the GPU.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
i915_gem_swizzle_page(struct page *page)
|
||||||
|
{
|
||||||
|
char *vaddr;
|
||||||
|
int i;
|
||||||
|
char temp[64];
|
||||||
|
|
||||||
|
vaddr = kmap(page);
|
||||||
|
if (vaddr == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
for (i = 0; i < PAGE_SIZE; i += 128) {
|
||||||
|
memcpy(temp, &vaddr[i], 64);
|
||||||
|
memcpy(&vaddr[i], &vaddr[i + 64], 64);
|
||||||
|
memcpy(&vaddr[i + 64], temp, 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
kunmap(page);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
|
int page_count = obj->size >> PAGE_SHIFT;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (obj_priv->bit_17 == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < page_count; i++) {
|
||||||
|
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
|
||||||
|
if ((new_bit_17 & 0x1) !=
|
||||||
|
(test_bit(i, obj_priv->bit_17) != 0)) {
|
||||||
|
int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
|
||||||
|
if (ret != 0) {
|
||||||
|
DRM_ERROR("Failed to swizzle page\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
set_page_dirty(obj_priv->pages[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
|
int page_count = obj->size >> PAGE_SHIFT;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (obj_priv->bit_17 == NULL) {
|
||||||
|
obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||||
|
sizeof(long), GFP_KERNEL);
|
||||||
|
if (obj_priv->bit_17 == NULL) {
|
||||||
|
DRM_ERROR("Failed to allocate memory for bit 17 "
|
||||||
|
"record\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < page_count; i++) {
|
||||||
|
if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
|
||||||
|
__set_bit(i, obj_priv->bit_17);
|
||||||
|
else
|
||||||
|
__clear_bit(i, obj_priv->bit_17);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = {
|
||||||
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
|
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
|
||||||
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
|
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
|
||||||
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
|
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
|
||||||
|
.find_pll = intel_find_best_PLL,
|
||||||
},
|
},
|
||||||
{ /* INTEL_LIMIT_IGD_LVDS */
|
{ /* INTEL_LIMIT_IGD_LVDS */
|
||||||
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
|
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
|
||||||
|
@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = {
|
||||||
/* IGD only supports single-channel mode. */
|
/* IGD only supports single-channel mode. */
|
||||||
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
|
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
|
||||||
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
|
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
|
||||||
|
.find_pll = intel_find_best_PLL,
|
||||||
},
|
},
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
|
||||||
|
|
||||||
static struct sysrq_key_op sysrq_intelfb_restore_op = {
|
static struct sysrq_key_op sysrq_intelfb_restore_op = {
|
||||||
.handler = intelfb_sysrq,
|
.handler = intelfb_sysrq,
|
||||||
.help_msg = "force fb",
|
.help_msg = "force-fb(G)",
|
||||||
.action_msg = "force restore of fb console",
|
.action_msg = "Restore framebuffer console",
|
||||||
};
|
};
|
||||||
|
|
||||||
int intelfb_probe(struct drm_device *dev)
|
int intelfb_probe(struct drm_device *dev)
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
struct intel_hdmi_priv {
|
struct intel_hdmi_priv {
|
||||||
u32 sdvox_reg;
|
u32 sdvox_reg;
|
||||||
u32 save_SDVOX;
|
u32 save_SDVOX;
|
||||||
int has_hdmi_sink;
|
bool has_hdmi_sink;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
||||||
|
@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_hdmi_sink_detect(struct drm_connector *connector)
|
||||||
|
{
|
||||||
|
struct intel_output *intel_output = to_intel_output(connector);
|
||||||
|
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
|
||||||
|
struct edid *edid = NULL;
|
||||||
|
|
||||||
|
edid = drm_get_edid(&intel_output->base,
|
||||||
|
&intel_output->ddc_bus->adapter);
|
||||||
|
if (edid != NULL) {
|
||||||
|
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
|
||||||
|
kfree(edid);
|
||||||
|
intel_output->base.display_info.raw_edid = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static enum drm_connector_status
|
static enum drm_connector_status
|
||||||
intel_hdmi_detect(struct drm_connector *connector)
|
intel_hdmi_detect(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
|
@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector)
|
||||||
return connector_status_unknown;
|
return connector_status_unknown;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
|
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
|
||||||
|
intel_hdmi_sink_detect(connector);
|
||||||
return connector_status_connected;
|
return connector_status_connected;
|
||||||
else
|
} else
|
||||||
return connector_status_disconnected;
|
return connector_status_disconnected;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
|
||||||
intel_sdvo_read_response(intel_output, &response, 2);
|
intel_sdvo_read_response(intel_output, &response, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
|
||||||
|
{
|
||||||
|
struct intel_output *intel_output = to_intel_output(connector);
|
||||||
|
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
|
||||||
|
struct edid *edid = NULL;
|
||||||
|
|
||||||
|
intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
|
||||||
|
edid = drm_get_edid(&intel_output->base,
|
||||||
|
&intel_output->ddc_bus->adapter);
|
||||||
|
if (edid != NULL) {
|
||||||
|
sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
|
||||||
|
kfree(edid);
|
||||||
|
intel_output->base.display_info.raw_edid = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
|
static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
u8 response[2];
|
u8 response[2];
|
||||||
|
@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
|
||||||
if (status != SDVO_CMD_STATUS_SUCCESS)
|
if (status != SDVO_CMD_STATUS_SUCCESS)
|
||||||
return connector_status_unknown;
|
return connector_status_unknown;
|
||||||
|
|
||||||
if ((response[0] != 0) || (response[1] != 0))
|
if ((response[0] != 0) || (response[1] != 0)) {
|
||||||
|
intel_sdvo_hdmi_sink_detect(connector);
|
||||||
return connector_status_connected;
|
return connector_status_connected;
|
||||||
else
|
} else
|
||||||
return connector_status_disconnected;
|
return connector_status_disconnected;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -594,6 +594,9 @@ struct drm_i915_gem_busy {
|
||||||
#define I915_BIT_6_SWIZZLE_9_10_11 4
|
#define I915_BIT_6_SWIZZLE_9_10_11 4
|
||||||
/* Not seen by userland */
|
/* Not seen by userland */
|
||||||
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
|
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
|
||||||
|
/* Seen by userland. */
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_17 6
|
||||||
|
#define I915_BIT_6_SWIZZLE_9_10_17 7
|
||||||
|
|
||||||
struct drm_i915_gem_set_tiling {
|
struct drm_i915_gem_set_tiling {
|
||||||
/** Handle of the buffer to have its tiling state updated */
|
/** Handle of the buffer to have its tiling state updated */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче