drm: Remove DRM_ERR OS macro.
This was used to make all ioctl handlers return -errno on linux and errno on *BSD. Instead, just return -errno in shared code, and flip sign on return f shared code to *BSD code. Signed-off-by: Dave Airlie <airlied@linux.ie>
This commit is contained in:
Родитель
23fd50450a
Коммит
20caafa6ec
|
@ -130,7 +130,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS)
|
|||
|
||||
if (update.num && !rects) {
|
||||
DRM_ERROR("Failed to allocate cliprect memory\n");
|
||||
err = DRM_ERR(ENOMEM);
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS)
|
|||
update.num *
|
||||
sizeof(*rects))) {
|
||||
DRM_ERROR("Failed to copy cliprects from userspace\n");
|
||||
err = DRM_ERR(EFAULT);
|
||||
err = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid update type %d\n", update.type);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -123,7 +123,7 @@ int drm_setunique(struct inode *inode, struct file *filp,
|
|||
*/
|
||||
ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
|
||||
if (ret != 3)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
domain = bus >> 8;
|
||||
bus &= 0xff;
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|||
if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
|
||||
if (dev->driver->dma_quiescent(dev)) {
|
||||
DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context);
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#define DRMFILE struct file *
|
||||
/** Ioctl arguments */
|
||||
#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data
|
||||
#define DRM_ERR(d) -(d)
|
||||
/** Current process ID */
|
||||
#define DRM_CURRENTPID current->pid
|
||||
#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
|
||||
|
|
|
@ -380,7 +380,7 @@ static int i810_dma_initialize(struct drm_device * dev,
|
|||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
|
|
@ -389,7 +389,7 @@ static int i830_dma_initialize(struct drm_device * dev,
|
|||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
|
|
@ -70,7 +70,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
|
|||
last_head = ring->head;
|
||||
}
|
||||
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
void i915_kernel_lost_context(struct drm_device * dev)
|
||||
|
@ -137,7 +137,7 @@ static int i915_initialize(struct drm_device * dev,
|
|||
DRM_ERROR("can not find sarea!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i915_dma_cleanup(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
|
@ -145,7 +145,7 @@ static int i915_initialize(struct drm_device * dev,
|
|||
dev->dev_private = (void *)dev_priv;
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->sarea_priv = (drm_i915_sarea_t *)
|
||||
|
@ -169,7 +169,7 @@ static int i915_initialize(struct drm_device * dev,
|
|||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
@ -200,7 +200,7 @@ static int i915_initialize(struct drm_device * dev,
|
|||
dev->dev_private = (void *)dev_priv;
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("Can not allocate hardware status page\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
|
||||
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
|
||||
|
@ -221,24 +221,24 @@ static int i915_dma_resume(struct drm_device * dev)
|
|||
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("can not find sarea!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!dev_priv->mmio_map) {
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Program Hardware Status Page */
|
||||
if (!dev_priv->hw_status_page) {
|
||||
DRM_ERROR("Can not find hardware status page\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
|
||||
|
||||
|
@ -266,7 +266,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
|
|||
dev_priv = drm_alloc(sizeof(drm_i915_private_t),
|
||||
DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
retcode = i915_initialize(dev, dev_priv, &init);
|
||||
break;
|
||||
case I915_CLEANUP_DMA:
|
||||
|
@ -276,7 +276,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
|
|||
retcode = i915_dma_resume(dev);
|
||||
break;
|
||||
default:
|
||||
retcode = DRM_ERR(EINVAL);
|
||||
retcode = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
|
|||
RING_LOCALS;
|
||||
|
||||
if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_LP_RING((dwords+1)&~1);
|
||||
|
||||
|
@ -374,17 +374,17 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
|
|||
int cmd, sz;
|
||||
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
OUT_RING(cmd);
|
||||
|
||||
while (++i, --sz) {
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
|
||||
sizeof(cmd))) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
OUT_RING(cmd);
|
||||
}
|
||||
|
@ -407,13 +407,13 @@ static int i915_emit_box(struct drm_device * dev,
|
|||
RING_LOCALS;
|
||||
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
|
||||
DRM_ERROR("Bad box %d,%d..%d,%d\n",
|
||||
box.x1, box.y1, box.x2, box.y2);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (IS_I965G(dev)) {
|
||||
|
@ -467,7 +467,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
|
|||
|
||||
if (cmd->sz & 0x3) {
|
||||
DRM_ERROR("alignment");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
|
@ -502,7 +502,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
|
|||
|
||||
if ((batch->start | batch->used) & 0x7) {
|
||||
DRM_ERROR("alignment");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
|
@ -619,7 +619,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv->allow_batchbuffer) {
|
||||
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
|
||||
|
@ -633,7 +633,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS)
|
|||
if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
|
||||
batch.num_cliprects *
|
||||
sizeof(struct drm_clip_rect)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
ret = i915_dispatch_batchbuffer(dev, &batch);
|
||||
|
||||
|
@ -664,7 +664,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
|
|||
cmdbuf.num_cliprects *
|
||||
sizeof(struct drm_clip_rect))) {
|
||||
DRM_ERROR("Fault accessing cliprects\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
|
||||
|
@ -697,7 +697,7 @@ static int i915_getparam(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
|
||||
|
@ -715,12 +715,12 @@ static int i915_getparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown parameter %d\n", param.param);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
|
||||
DRM_ERROR("DRM_COPY_TO_USER failed\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -734,7 +734,7 @@ static int i915_setparam(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
|
||||
|
@ -753,7 +753,7 @@ static int i915_setparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown parameter %d\n", param.param);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -767,7 +767,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
|
||||
sizeof(hws));
|
||||
|
@ -788,7 +788,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
|
|||
dev_priv->status_gfx_addr = 0;
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" G33 hw status page\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_priv->hw_status_page = dev_priv->hws_map.handle;
|
||||
|
||||
|
|
|
@ -311,7 +311,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
|
||||
if (ret == DRM_ERR(EBUSY)) {
|
||||
if (ret == -EBUSY) {
|
||||
DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
|
||||
__FUNCTION__,
|
||||
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
|
||||
|
@ -330,7 +330,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
|
||||
|
@ -366,7 +366,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
|
||||
|
@ -376,7 +376,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS)
|
|||
|
||||
if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -392,7 +392,7 @@ int i915_irq_wait(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
|
||||
|
@ -425,7 +425,7 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
|
||||
|
@ -434,7 +434,7 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
|
|||
if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
|
||||
DRM_ERROR("%s called with invalid pipe 0x%x\n",
|
||||
__FUNCTION__, pipe.pipe);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->vblank_pipe = pipe.pipe;
|
||||
|
@ -453,7 +453,7 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
flag = I915_READ(I915REG_INT_ENABLE_R);
|
||||
|
@ -482,12 +482,12 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __func__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev_priv->sarea_priv->rotation) {
|
||||
DRM_DEBUG("Rotation not supported\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
|
||||
|
@ -496,7 +496,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
|
|||
if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
|
||||
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
|
||||
DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
|
||||
|
@ -505,7 +505,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!(dev_priv->vblank_pipe & (1 << pipe))) {
|
||||
DRM_ERROR("Invalid pipe %d\n", pipe);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
@ -513,7 +513,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
|
|||
if (!drm_get_drawable_info(dev, swap.drawable)) {
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
|
@ -528,7 +528,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
|
|||
swap.sequence = curseq + 1;
|
||||
} else {
|
||||
DRM_DEBUG("Missed target sequence\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -550,14 +550,14 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
|
|||
|
||||
if (dev_priv->swaps_pending >= 100) {
|
||||
DRM_DEBUG("Too many swaps queued\n");
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
|
||||
|
||||
if (!vbl_swap) {
|
||||
DRM_ERROR("Failed to allocate memory to queue swap\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
|
|
@ -276,7 +276,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
|
||||
|
@ -284,7 +284,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS)
|
|||
|
||||
heap = get_heap(dev_priv, alloc.region);
|
||||
if (!heap || !*heap)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
/* Make things easier on ourselves: all allocations at least
|
||||
* 4k aligned.
|
||||
|
@ -295,13 +295,13 @@ int i915_mem_alloc(DRM_IOCTL_ARGS)
|
|||
block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
|
||||
|
||||
if (!block)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
mark_block(dev, block, 1);
|
||||
|
||||
if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -316,7 +316,7 @@ int i915_mem_free(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
|
||||
|
@ -324,14 +324,14 @@ int i915_mem_free(DRM_IOCTL_ARGS)
|
|||
|
||||
heap = get_heap(dev_priv, memfree.region);
|
||||
if (!heap || !*heap)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
block = find_block(*heap, memfree.region_offset);
|
||||
if (!block)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if (block->filp != filp)
|
||||
return DRM_ERR(EPERM);
|
||||
return -EPERM;
|
||||
|
||||
mark_block(dev, block, 0);
|
||||
free_block(block);
|
||||
|
@ -347,7 +347,7 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(initheap,
|
||||
|
@ -356,11 +356,11 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS)
|
|||
|
||||
heap = get_heap(dev_priv, initheap.region);
|
||||
if (!heap)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if (*heap) {
|
||||
DRM_ERROR("heap already initialized?");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return init_heap(heap, initheap.start, initheap.size);
|
||||
|
@ -375,7 +375,7 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
|
|||
|
||||
if ( !dev_priv ) {
|
||||
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
|
||||
|
@ -384,12 +384,12 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
|
|||
heap = get_heap( dev_priv, destroyheap.region );
|
||||
if (!heap) {
|
||||
DRM_ERROR("get_heap failed");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!*heap) {
|
||||
DRM_ERROR("heap not initialized?");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
i915_mem_takedown( heap );
|
||||
|
|
|
@ -71,7 +71,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
|
|||
DRM_ERROR("failed!\n");
|
||||
DRM_INFO(" status=0x%08x\n", status);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
|
||||
|
@ -256,7 +256,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
|
|||
|
||||
dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv->head == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
|
||||
SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
|
||||
|
@ -267,7 +267,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
|
|||
|
||||
entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
|
||||
if (entry == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
memset(entry, 0, sizeof(drm_mga_freelist_t));
|
||||
|
||||
|
@ -399,7 +399,7 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
|
|||
|
||||
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
|
||||
if (!dev_priv)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
memset(dev_priv, 0, sizeof(drm_mga_private_t));
|
||||
|
@ -578,7 +578,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
|
|||
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
|
||||
dev_priv->warp->handle, dev_priv->primary->handle,
|
||||
dev->agp_buffer_map->handle);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_priv->dma_access = MGA_PAGPXFER;
|
||||
|
@ -622,7 +622,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
|
|||
|
||||
if (dev->dma == NULL) {
|
||||
DRM_ERROR("dev->dma is NULL\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Make drm_addbufs happy by not trying to create a mapping for less
|
||||
|
@ -656,7 +656,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
|
|||
|
||||
if (err != 0) {
|
||||
DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (dev_priv->primary->size != dma_bs->primary_size) {
|
||||
|
@ -826,7 +826,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
|
|||
dev_priv->sarea = drm_getsarea(dev);
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("failed to find sarea!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!dev_priv->used_new_dma_init) {
|
||||
|
@ -837,29 +837,29 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
|
|||
dev_priv->status = drm_core_findmap(dev, init->status_offset);
|
||||
if (!dev_priv->status) {
|
||||
DRM_ERROR("failed to find status page!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio) {
|
||||
DRM_ERROR("failed to find mmio region!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
|
||||
if (!dev_priv->warp) {
|
||||
DRM_ERROR("failed to find warp microcode region!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
|
||||
if (!dev_priv->primary) {
|
||||
DRM_ERROR("failed to find primary dma region!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->agp_buffer_token = init->buffers_offset;
|
||||
dev->agp_buffer_map =
|
||||
drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
DRM_ERROR("failed to find dma buffer region!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_core_ioremap(dev_priv->warp, dev);
|
||||
|
@ -877,7 +877,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
|
|||
((dev->agp_buffer_map == NULL) ||
|
||||
(dev->agp_buffer_map->handle == NULL)))) {
|
||||
DRM_ERROR("failed to ioremap agp regions!\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = mga_warp_install_microcode(dev_priv);
|
||||
|
@ -927,7 +927,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
|
|||
|
||||
if (mga_freelist_init(dev, dev_priv) < 0) {
|
||||
DRM_ERROR("could not initialize freelist\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1029,7 +1029,7 @@ int mga_dma_init(DRM_IOCTL_ARGS)
|
|||
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
|
||||
}
|
||||
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
|
@ -1094,16 +1094,16 @@ static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm
|
|||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = mga_freelist_get(dev);
|
||||
if (!buf)
|
||||
return DRM_ERR(EAGAIN);
|
||||
return -EAGAIN;
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
if (DRM_COPY_TO_USER(&d->request_indices[i],
|
||||
&buf->idx, sizeof(buf->idx)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
if (DRM_COPY_TO_USER(&d->request_sizes[i],
|
||||
&buf->total, sizeof(buf->total)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
d->granted_count++;
|
||||
}
|
||||
|
@ -1128,7 +1128,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.send_count != 0) {
|
||||
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
|
||||
DRM_CURRENTPID, d.send_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* We'll send you buffers.
|
||||
|
@ -1136,7 +1136,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.request_count < 0 || d.request_count > dma->buf_count) {
|
||||
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
|
||||
DRM_CURRENTPID, d.request_count, dma->buf_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WRAP_TEST_WITH_RETURN(dev_priv);
|
||||
|
|
|
@ -245,7 +245,7 @@ do { \
|
|||
dev_priv->prim.high_mark ) { \
|
||||
if ( MGA_DMA_DEBUG ) \
|
||||
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
|
||||
return DRM_ERR(EBUSY); \
|
||||
return -EBUSY; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -256,7 +256,7 @@ do { \
|
|||
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
|
||||
if ( MGA_DMA_DEBUG ) \
|
||||
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
|
||||
return DRM_ERR(EBUSY); \
|
||||
return -EBUSY; \
|
||||
} \
|
||||
mga_do_dma_wrap_end( dev_priv ); \
|
||||
} \
|
||||
|
|
|
@ -392,7 +392,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
|
|||
ctx->dstorg, dev_priv->front_offset,
|
||||
dev_priv->back_offset);
|
||||
ctx->dstorg = 0;
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -411,7 +411,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
|
|||
if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
|
||||
DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
|
||||
tex->texorg = 0;
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -453,13 +453,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
|
|||
dstorg + length > (dev_priv->texture_offset +
|
||||
dev_priv->texture_size)) {
|
||||
DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (length & MGA_ILOAD_MASK) {
|
||||
DRM_ERROR("*** bad iload length: 0x%x\n",
|
||||
length & MGA_ILOAD_MASK);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -471,7 +471,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
|
|||
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
|
||||
(dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
|
||||
DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -892,7 +892,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
|
|||
sizeof(vertex));
|
||||
|
||||
if (vertex.idx < 0 || vertex.idx > dma->buf_count)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
buf = dma->buflist[vertex.idx];
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
|
@ -906,7 +906,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
|
|||
buf_priv->dispatched = 0;
|
||||
mga_freelist_put(dev, buf);
|
||||
}
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WRAP_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -932,7 +932,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS)
|
|||
sizeof(indices));
|
||||
|
||||
if (indices.idx < 0 || indices.idx > dma->buf_count)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
buf = dma->buflist[indices.idx];
|
||||
buf_priv = buf->dev_private;
|
||||
|
@ -946,7 +946,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS)
|
|||
buf_priv->dispatched = 0;
|
||||
mga_freelist_put(dev, buf);
|
||||
}
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WRAP_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -975,18 +975,18 @@ static int mga_dma_iload(DRM_IOCTL_ARGS)
|
|||
if (mga_do_wait_for_idle(dev_priv) < 0) {
|
||||
if (MGA_DMA_DEBUG)
|
||||
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
#endif
|
||||
if (iload.idx < 0 || iload.idx > dma->buf_count)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
buf = dma->buflist[iload.idx];
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) {
|
||||
mga_freelist_put(dev, buf);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WRAP_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -1017,7 +1017,7 @@ static int mga_dma_blit(DRM_IOCTL_ARGS)
|
|||
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
|
||||
|
||||
if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg))
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
WRAP_TEST_WITH_RETURN(dev_priv);
|
||||
|
||||
|
@ -1039,7 +1039,7 @@ static int mga_getparam(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data,
|
||||
|
@ -1055,12 +1055,12 @@ static int mga_getparam(DRM_IOCTL_ARGS)
|
|||
value = dev_priv->chipset;
|
||||
break;
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1075,7 +1075,7 @@ static int mga_set_fence(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
|
||||
|
@ -1095,7 +1095,7 @@ static int mga_set_fence(DRM_IOCTL_ARGS)
|
|||
|
||||
if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1109,7 +1109,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
|
||||
|
@ -1120,7 +1120,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS)
|
|||
|
||||
if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -141,7 +141,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
|
|||
if (size > dev_priv->warp->size) {
|
||||
DRM_ERROR("microcode too large! (%u > %lu)\n",
|
||||
size, dev_priv->warp->size);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
|
@ -151,7 +151,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
|
|||
case MGA_CARD_TYPE_G200:
|
||||
return mga_warp_install_g200_microcode(dev_priv);
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,7 +177,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
|
|||
MGA_WRITE(MGA_WVRTXSZ, 7);
|
||||
break;
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
|
||||
|
@ -186,7 +186,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
|
|||
if (wmisc != WMISC_EXPECTED) {
|
||||
DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
|
||||
wmisc, WMISC_EXPECTED);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -129,7 +129,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
|
|||
#if R128_FIFO_DEBUG
|
||||
DRM_ERROR("failed!\n");
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
|
||||
|
@ -146,7 +146,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
|
|||
#if R128_FIFO_DEBUG
|
||||
DRM_ERROR("failed!\n");
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
|
||||
|
@ -168,7 +168,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
|
|||
#if R128_FIFO_DEBUG
|
||||
DRM_ERROR("failed!\n");
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
|
@ -227,7 +227,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
|
|||
DRM_ERROR("failed!\n");
|
||||
r128_status(dev_priv);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Start the Concurrent Command Engine.
|
||||
|
@ -355,7 +355,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
|
||||
dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dev_priv, 0, sizeof(drm_r128_private_t));
|
||||
|
||||
|
@ -365,7 +365,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("PCI GART memory not allocated!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->usec_timeout = init->usec_timeout;
|
||||
|
@ -374,7 +374,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_DEBUG("TIMEOUT problem!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->cce_mode = init->cce_mode;
|
||||
|
@ -394,7 +394,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_DEBUG("Bad cce_mode!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (init->cce_mode) {
|
||||
|
@ -461,7 +461,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("could not find sarea!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
|
@ -469,21 +469,21 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("could not find mmio region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if (!dev_priv->cce_ring) {
|
||||
DRM_ERROR("could not find cce ring region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if (!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->agp_buffer_token = init->buffers_offset;
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
|
@ -491,7 +491,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!dev_priv->is_pci) {
|
||||
|
@ -501,7 +501,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("could not find agp texture region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -520,7 +520,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("Could not ioremap agp regions!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
|
@ -567,7 +567,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
|
|||
DRM_ERROR("failed to init PCI GART!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
|
||||
#if __OS_HAS_AGP
|
||||
|
@ -644,7 +644,7 @@ int r128_cce_init(DRM_IOCTL_ARGS)
|
|||
return r128_do_cleanup_cce(dev);
|
||||
}
|
||||
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int r128_cce_start(DRM_IOCTL_ARGS)
|
||||
|
@ -721,7 +721,7 @@ int r128_cce_reset(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r128_do_cce_reset(dev_priv);
|
||||
|
@ -759,7 +759,7 @@ int r128_engine_reset(DRM_IOCTL_ARGS)
|
|||
|
||||
int r128_fullscreen(DRM_IOCTL_ARGS)
|
||||
{
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
|
@ -780,7 +780,7 @@ static int r128_freelist_init(struct drm_device * dev)
|
|||
|
||||
dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv->head == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
|
||||
dev_priv->head->age = R128_BUFFER_USED;
|
||||
|
@ -791,7 +791,7 @@ static int r128_freelist_init(struct drm_device * dev)
|
|||
|
||||
entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
|
||||
if (!entry)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
entry->age = R128_BUFFER_FREE;
|
||||
entry->buf = buf;
|
||||
|
@ -883,7 +883,7 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
|
|||
|
||||
/* FIXME: This is being ignored... */
|
||||
DRM_ERROR("failed!\n");
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d)
|
||||
|
@ -894,16 +894,16 @@ static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct dr
|
|||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = r128_freelist_get(dev);
|
||||
if (!buf)
|
||||
return DRM_ERR(EAGAIN);
|
||||
return -EAGAIN;
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
|
||||
sizeof(buf->idx)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
|
||||
sizeof(buf->total)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
d->granted_count++;
|
||||
}
|
||||
|
@ -927,7 +927,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.send_count != 0) {
|
||||
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
|
||||
DRM_CURRENTPID, d.send_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* We'll send you buffers.
|
||||
|
@ -935,7 +935,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.request_count < 0 || d.request_count > dma->buf_count) {
|
||||
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
|
||||
DRM_CURRENTPID, d.request_count, dma->buf_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
d.granted_count = 0;
|
||||
|
|
|
@ -428,7 +428,7 @@ do { \
|
|||
DRM_UDELAY(1); \
|
||||
} \
|
||||
DRM_ERROR( "ring space check failed!\n" ); \
|
||||
return DRM_ERR(EBUSY); \
|
||||
return -EBUSY; \
|
||||
} \
|
||||
__ring_space_done: \
|
||||
; \
|
||||
|
|
|
@ -809,7 +809,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid blit format %d\n", blit->format);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Flush the pixel cache, and mark the contents as Read Invalid.
|
||||
|
@ -832,11 +832,11 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", blit->idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf_priv->discard = 1;
|
||||
|
@ -900,22 +900,22 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
|
|||
|
||||
count = depth->n;
|
||||
if (count > 4096 || count <= 0)
|
||||
return DRM_ERR(EMSGSIZE);
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
buffer_size = depth->n * sizeof(u32);
|
||||
buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
|
||||
if (buffer == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
|
||||
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mask_size = depth->n * sizeof(u8);
|
||||
|
@ -923,12 +923,12 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
|
|||
mask = drm_alloc(mask_size, DRM_MEM_BUFS);
|
||||
if (mask == NULL) {
|
||||
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
|
||||
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
|
||||
drm_free(mask, mask_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++, x++) {
|
||||
|
@ -996,28 +996,28 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
|
|||
|
||||
count = depth->n;
|
||||
if (count > 4096 || count <= 0)
|
||||
return DRM_ERR(EMSGSIZE);
|
||||
return -EMSGSIZE;
|
||||
|
||||
xbuf_size = count * sizeof(*x);
|
||||
ybuf_size = count * sizeof(*y);
|
||||
x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
|
||||
if (x == NULL) {
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
|
||||
if (y == NULL) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
buffer_size = depth->n * sizeof(u32);
|
||||
|
@ -1025,13 +1025,13 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
|
|||
if (buffer == NULL) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (depth->mask) {
|
||||
|
@ -1041,14 +1041,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
|
|||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
|
||||
drm_free(mask, mask_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -1115,13 +1115,13 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
|
|||
|
||||
count = depth->n;
|
||||
if (count > 4096 || count <= 0)
|
||||
return DRM_ERR(EMSGSIZE);
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
BEGIN_RING(7);
|
||||
|
@ -1159,7 +1159,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
|
|||
|
||||
count = depth->n;
|
||||
if (count > 4096 || count <= 0)
|
||||
return DRM_ERR(EMSGSIZE);
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (count > dev_priv->depth_pitch) {
|
||||
count = dev_priv->depth_pitch;
|
||||
|
@ -1169,22 +1169,22 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
|
|||
ybuf_size = count * sizeof(*y);
|
||||
x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
|
||||
if (x == NULL) {
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
|
||||
if (y == NULL) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
|
||||
drm_free(x, xbuf_size, DRM_MEM_BUFS);
|
||||
drm_free(y, ybuf_size, DRM_MEM_BUFS);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -1363,7 +1363,7 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
|
||||
|
@ -1375,12 +1375,12 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
|
|||
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
vertex.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vertex.prim < 0 ||
|
||||
vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
|
||||
DRM_ERROR("buffer prim %d\n", vertex.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -1392,11 +1392,11 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", vertex.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf->used = vertex.count;
|
||||
|
@ -1423,7 +1423,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
|
||||
|
@ -1435,11 +1435,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS)
|
|||
if (elts.idx < 0 || elts.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
elts.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
|
||||
DRM_ERROR("buffer prim %d\n", elts.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -1451,11 +1451,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", elts.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
count = (elts.end - elts.start) / sizeof(u16);
|
||||
|
@ -1463,11 +1463,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS)
|
|||
|
||||
if (elts.start & 0x7) {
|
||||
DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (elts.start < buf->used) {
|
||||
DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf->used = elts.end;
|
||||
|
@ -1498,7 +1498,7 @@ static int r128_cce_blit(DRM_IOCTL_ARGS)
|
|||
if (blit.idx < 0 || blit.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
blit.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -1524,7 +1524,7 @@ static int r128_cce_depth(DRM_IOCTL_ARGS)
|
|||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
switch (depth.func) {
|
||||
case R128_WRITE_SPAN:
|
||||
ret = r128_cce_dispatch_write_span(dev, &depth);
|
||||
|
@ -1557,7 +1557,7 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS)
|
|||
sizeof(stipple));
|
||||
|
||||
if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
||||
|
@ -1583,7 +1583,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
|
||||
|
@ -1595,7 +1595,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
|
|||
if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
indirect.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf = dma->buflist[indirect.idx];
|
||||
|
@ -1604,17 +1604,17 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", indirect.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (indirect.start < buf->used) {
|
||||
DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
|
||||
indirect.start, buf->used);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -1651,7 +1651,7 @@ static int r128_getparam(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
|
||||
|
@ -1664,12 +1664,12 @@ static int r128_getparam(DRM_IOCTL_ARGS)
|
|||
value = dev->irq;
|
||||
break;
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
|
|||
if (DRM_COPY_FROM_USER_UNCHECKED
|
||||
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
|
||||
DRM_ERROR("copy cliprect faulted\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
box.x1 =
|
||||
|
@ -263,7 +263,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
|||
DRM_ERROR
|
||||
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
|
||||
reg, sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < sz; i++) {
|
||||
values[i] = ((int *)cmdbuf->buf)[i];
|
||||
|
@ -275,13 +275,13 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
|||
DRM_ERROR
|
||||
("Offset failed range check (reg=%04x sz=%d)\n",
|
||||
reg, sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Register %04x failed check as flag=%02x\n",
|
||||
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -317,12 +317,12 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
|
|||
return 0;
|
||||
|
||||
if (sz * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (reg + sz * 4 >= 0x10000) {
|
||||
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
|
||||
sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (r300_check_range(reg, sz)) {
|
||||
|
@ -362,7 +362,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
|
|||
if (!sz)
|
||||
return 0;
|
||||
if (sz * 16 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_RING(5 + sz * 4);
|
||||
/* Wait for VAP to come to senses.. */
|
||||
|
@ -391,7 +391,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
|
|||
RING_LOCALS;
|
||||
|
||||
if (8 * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_RING(10);
|
||||
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
|
||||
|
@ -421,7 +421,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
|
|||
if ((count + 1) > MAX_ARRAY_PACKET) {
|
||||
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
|
||||
count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
memset(payload, 0, MAX_ARRAY_PACKET * 4);
|
||||
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
|
||||
|
@ -437,7 +437,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
|
|||
DRM_ERROR
|
||||
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
|
@ -448,7 +448,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
|
|||
DRM_ERROR
|
||||
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
|
@ -458,7 +458,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
|
|||
DRM_ERROR
|
||||
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
|
||||
k, i, narrays, count + 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* all clear, output packet */
|
||||
|
@ -492,7 +492,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
|
|||
ret = !radeon_check_offset(dev_priv, offset);
|
||||
if (ret) {
|
||||
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
|
|||
ret = !radeon_check_offset(dev_priv, offset);
|
||||
if (ret) {
|
||||
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -530,12 +530,12 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
|
|||
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = !radeon_check_offset(dev_priv, cmd[2]);
|
||||
if (ret) {
|
||||
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(count+2);
|
||||
|
@ -557,7 +557,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
RING_LOCALS;
|
||||
|
||||
if (4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
/* Fixme !! This simply emits a packet without much checking.
|
||||
We need to be smarter. */
|
||||
|
@ -568,7 +568,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
/* Is it packet 3 ? */
|
||||
if ((header >> 30) != 0x3) {
|
||||
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
count = (header >> 16) & 0x3fff;
|
||||
|
@ -578,7 +578,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
DRM_ERROR
|
||||
("Expected packet3 of length %d but have only %d bytes left\n",
|
||||
(count + 2) * 4, cmdbuf->bufsz);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Is it a packet type we know about ? */
|
||||
|
@ -600,7 +600,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(count + 2);
|
||||
|
@ -664,7 +664,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
|
|||
DRM_ERROR("bad packet3 type %i at %p\n",
|
||||
header.packet3.packet,
|
||||
cmdbuf->buf - sizeof(header));
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
n += R300_SIMULTANEOUS_CLIPRECTS;
|
||||
|
@ -726,11 +726,11 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
|
|||
|
||||
if (cmdbuf->bufsz <
|
||||
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (header.scratch.reg >= 5) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->scratch_ages[header.scratch.reg]++;
|
||||
|
@ -745,21 +745,21 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
|
|||
buf_idx *= 2; /* 8 bytes per buf */
|
||||
|
||||
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (h_pending == 0) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
h_pending--;
|
||||
|
||||
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmdbuf->buf += sizeof(buf_idx);
|
||||
|
@ -879,7 +879,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
if (idx < 0 || idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
idx, dma->buf_count - 1);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -887,7 +887,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
if (buf->filp != filp || buf->pending) {
|
||||
DRM_ERROR("bad buffer %p %p %d\n",
|
||||
buf->filp, filp, buf->pending);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -924,7 +924,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
|
|||
DRM_ERROR("bad cmd_type %i at %p\n",
|
||||
header.header.cmd_type,
|
||||
cmdbuf->buf - sizeof(header));
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -889,7 +889,7 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
|
|||
DRM_ERROR("failed!\n");
|
||||
radeon_status(dev_priv);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
|
||||
|
@ -910,7 +910,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
|
|||
DRM_ERROR("failed!\n");
|
||||
radeon_status(dev_priv);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
|
||||
|
@ -936,7 +936,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
|
|||
DRM_ERROR("failed!\n");
|
||||
radeon_status(dev_priv);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
|
@ -1394,7 +1394,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
|
||||
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
|
||||
|
@ -1409,7 +1409,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
|
||||
DRM_ERROR("PCI GART memory not allocated!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->usec_timeout = init->usec_timeout;
|
||||
|
@ -1417,7 +1417,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
|
||||
DRM_DEBUG("TIMEOUT problem!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Enable vblank on CRTC1 for older X servers
|
||||
|
@ -1446,7 +1446,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
|
||||
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (init->fb_bpp) {
|
||||
|
@ -1515,27 +1515,27 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if (!dev_priv->cp_ring) {
|
||||
DRM_ERROR("could not find cp ring region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if (!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->agp_buffer_token = init->buffers_offset;
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (init->gart_textures_offset) {
|
||||
|
@ -1544,7 +1544,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
if (!dev_priv->gart_textures) {
|
||||
DRM_ERROR("could not find GART texture region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1562,7 +1562,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
!dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("could not find ioremap agp regions!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
|
@ -1710,14 +1710,14 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
|
|||
DRM_ERROR
|
||||
("Cannot use PCI Express without GART in FB memory\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
|
||||
DRM_ERROR("failed to init PCI GART!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Turn on PCI GART */
|
||||
|
@ -1797,7 +1797,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("Called with no initialization\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
|
||||
|
@ -1845,7 +1845,7 @@ int radeon_cp_init(DRM_IOCTL_ARGS)
|
|||
return radeon_do_cleanup_cp(dev);
|
||||
}
|
||||
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int radeon_cp_start(DRM_IOCTL_ARGS)
|
||||
|
@ -1973,7 +1973,7 @@ int radeon_cp_reset(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
radeon_do_cp_reset(dev_priv);
|
||||
|
@ -2167,7 +2167,7 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
|
|||
radeon_status(dev_priv);
|
||||
DRM_ERROR("failed!\n");
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev,
|
||||
|
@ -2179,16 +2179,16 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev,
|
|||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = radeon_freelist_get(dev);
|
||||
if (!buf)
|
||||
return DRM_ERR(EBUSY); /* NOTE: broken client */
|
||||
return -EBUSY; /* NOTE: broken client */
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
|
||||
sizeof(buf->idx)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
|
||||
sizeof(buf->total)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
d->granted_count++;
|
||||
}
|
||||
|
@ -2212,7 +2212,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.send_count != 0) {
|
||||
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
|
||||
DRM_CURRENTPID, d.send_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* We'll send you buffers.
|
||||
|
@ -2220,7 +2220,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.request_count < 0 || d.request_count > dma->buf_count) {
|
||||
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
|
||||
DRM_CURRENTPID, d.request_count, dma->buf_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
d.granted_count = 0;
|
||||
|
@ -2241,7 +2241,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dev_priv, 0, sizeof(drm_radeon_private_t));
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
|
|
@ -155,7 +155,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence
|
|||
atomic_t *counter;
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (crtc == DRM_RADEON_VBLANK_CRTC1) {
|
||||
|
@ -165,7 +165,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence
|
|||
counter = &dev->vbl_received2;
|
||||
ack |= RADEON_CRTC2_VBLANK_STAT;
|
||||
} else
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
radeon_acknowledge_irqs(dev_priv, ack);
|
||||
|
||||
|
@ -207,7 +207,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data,
|
||||
|
@ -217,7 +217,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS)
|
|||
|
||||
if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -233,7 +233,7 @@ int radeon_irq_wait(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data,
|
||||
|
@ -320,7 +320,7 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
|
|||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
|
||||
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->vblank_crtc = (unsigned int)value;
|
||||
radeon_enable_interrupt(dev);
|
||||
|
|
|
@ -137,12 +137,12 @@ static int init_heap(struct mem_block **heap, int start, int size)
|
|||
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
|
||||
|
||||
if (!blocks)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
|
||||
if (!*heap) {
|
||||
drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
blocks->start = start;
|
||||
|
@ -226,7 +226,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data,
|
||||
|
@ -234,7 +234,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS)
|
|||
|
||||
heap = get_heap(dev_priv, alloc.region);
|
||||
if (!heap || !*heap)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
/* Make things easier on ourselves: all allocations at least
|
||||
* 4k aligned.
|
||||
|
@ -245,11 +245,11 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS)
|
|||
block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
|
||||
|
||||
if (!block)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -264,7 +264,7 @@ int radeon_mem_free(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
|
||||
|
@ -272,14 +272,14 @@ int radeon_mem_free(DRM_IOCTL_ARGS)
|
|||
|
||||
heap = get_heap(dev_priv, memfree.region);
|
||||
if (!heap || !*heap)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
block = find_block(*heap, memfree.region_offset);
|
||||
if (!block)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if (block->filp != filp)
|
||||
return DRM_ERR(EPERM);
|
||||
return -EPERM;
|
||||
|
||||
free_block(block);
|
||||
return 0;
|
||||
|
@ -294,7 +294,7 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS)
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(initheap,
|
||||
|
@ -303,11 +303,11 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS)
|
|||
|
||||
heap = get_heap(dev_priv, initheap.region);
|
||||
if (!heap)
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if (*heap) {
|
||||
DRM_ERROR("heap already initialized?");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return init_heap(heap, initheap.start, initheap.size);
|
||||
|
|
|
@ -85,7 +85,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
|||
*offset = off;
|
||||
return 0;
|
||||
}
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
||||
|
@ -99,7 +99,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
|
||||
DRM_ERROR("Invalid depth buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -107,7 +107,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
|
||||
DRM_ERROR("Invalid colour buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -120,7 +120,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[0])) {
|
||||
DRM_ERROR("Invalid R200 texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -130,7 +130,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
|
||||
DRM_ERROR("Invalid R100 texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -147,7 +147,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
&data[i])) {
|
||||
DRM_ERROR
|
||||
("Invalid R200 cubic texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -163,7 +163,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
&data[i])) {
|
||||
DRM_ERROR
|
||||
("Invalid R100 cubic texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
|||
|
||||
default:
|
||||
DRM_ERROR("Unknown state packet ID %d\n", id);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -277,12 +277,12 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
|
||||
if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
|
||||
DRM_ERROR("Not a type 3 packet\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (4 * *cmdsz > cmdbuf->bufsz) {
|
||||
DRM_ERROR("Packet size larger than size of data provided\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch(cmd[0] & 0xff00) {
|
||||
|
@ -307,7 +307,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
/* safe but r200 only */
|
||||
if (dev_priv->microcode_version != UCODE_R200) {
|
||||
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -317,7 +317,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
if (count > 18) { /* 12 arrays max */
|
||||
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
|
||||
count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* carefully check packet contents */
|
||||
|
@ -330,7 +330,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
DRM_ERROR
|
||||
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
|
@ -341,7 +341,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
DRM_ERROR
|
||||
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
|
@ -351,33 +351,33 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
DRM_ERROR
|
||||
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
|
||||
k, i, narrays, count + 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_3D_RNDR_GEN_INDX_PRIM:
|
||||
if (dev_priv->microcode_version != UCODE_R100) {
|
||||
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) {
|
||||
DRM_ERROR("Invalid rndr_gen_indx offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RADEON_CP_INDX_BUFFER:
|
||||
if (dev_priv->microcode_version != UCODE_R200) {
|
||||
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
|
||||
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) {
|
||||
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -391,7 +391,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
if (radeon_check_and_fixup_offset
|
||||
(dev_priv, filp_priv, &offset)) {
|
||||
DRM_ERROR("Invalid first packet offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
|
||||
}
|
||||
|
@ -402,7 +402,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
if (radeon_check_and_fixup_offset
|
||||
(dev_priv, filp_priv, &offset)) {
|
||||
DRM_ERROR("Invalid second packet offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
|||
|
||||
default:
|
||||
DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -451,13 +451,13 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&ctx->rb3d_depthoffset)) {
|
||||
DRM_ERROR("Invalid depth buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&ctx->rb3d_coloroffset)) {
|
||||
DRM_ERROR("Invalid depth buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(14);
|
||||
|
@ -546,7 +546,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&tex[0].pp_txoffset)) {
|
||||
DRM_ERROR("Invalid texture offset for unit 0\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(9);
|
||||
|
@ -566,7 +566,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&tex[1].pp_txoffset)) {
|
||||
DRM_ERROR("Invalid texture offset for unit 1\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(9);
|
||||
|
@ -586,7 +586,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
|
|||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
|
||||
&tex[2].pp_txoffset)) {
|
||||
DRM_ERROR("Invalid texture offset for unit 2\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(9);
|
||||
|
@ -1668,7 +1668,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
|
||||
if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) {
|
||||
DRM_ERROR("Invalid destination offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
|
||||
|
@ -1711,11 +1711,11 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid texture format %d\n", tex->format);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
spitch = blit_width >> 6;
|
||||
if (spitch == 0 && image->height > 1)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
texpitch = tex->pitch;
|
||||
if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
|
||||
|
@ -1760,8 +1760,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
if (!buf) {
|
||||
DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
|
||||
if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return DRM_ERR(EAGAIN);
|
||||
return -EFAULT;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Dispatch the indirect buffer.
|
||||
|
@ -1774,7 +1774,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
do { \
|
||||
if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
|
||||
return DRM_ERR(EFAULT); \
|
||||
return -EFAULT; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
|
@ -2083,7 +2083,7 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS)
|
|||
sizeof(alloc));
|
||||
|
||||
if (alloc_surface(&alloc, dev_priv, filp) == -1)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@ -2098,7 +2098,7 @@ static int radeon_surface_free(DRM_IOCTL_ARGS)
|
|||
sizeof(memfree));
|
||||
|
||||
if (free_surface(filp, dev_priv, memfree.address))
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@ -2124,7 +2124,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS)
|
|||
|
||||
if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes,
|
||||
sarea_priv->nbox * sizeof(depth_boxes[0])))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
radeon_cp_dispatch_clear(dev, &clear, depth_boxes);
|
||||
|
||||
|
@ -2226,11 +2226,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
|
|||
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
vertex.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
|
||||
DRM_ERROR("buffer prim %d\n", vertex.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -2241,11 +2241,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", vertex.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Build up a prim_t record:
|
||||
|
@ -2259,7 +2259,7 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
|
|||
sarea_priv->tex_state,
|
||||
sarea_priv->dirty)) {
|
||||
DRM_ERROR("radeon_emit_state failed\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
|
||||
|
@ -2310,11 +2310,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
|
|||
if (elts.idx < 0 || elts.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
elts.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
|
||||
DRM_ERROR("buffer prim %d\n", elts.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -2325,11 +2325,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", elts.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
count = (elts.end - elts.start) / sizeof(u16);
|
||||
|
@ -2337,11 +2337,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
|
|||
|
||||
if (elts.start & 0x7) {
|
||||
DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (elts.start < buf->used) {
|
||||
DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf->used = elts.end;
|
||||
|
@ -2352,7 +2352,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
|
|||
sarea_priv->tex_state,
|
||||
sarea_priv->dirty)) {
|
||||
DRM_ERROR("radeon_emit_state failed\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
|
||||
|
@ -2394,13 +2394,13 @@ static int radeon_cp_texture(DRM_IOCTL_ARGS)
|
|||
|
||||
if (tex.image == NULL) {
|
||||
DRM_ERROR("null texture image!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(&image,
|
||||
(drm_radeon_tex_image_t __user *) tex.image,
|
||||
sizeof(image)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
VB_AGE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -2424,7 +2424,7 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS)
|
|||
sizeof(stipple));
|
||||
|
||||
if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
||||
|
@ -2455,7 +2455,7 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
|
|||
if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
indirect.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf = dma->buflist[indirect.idx];
|
||||
|
@ -2463,17 +2463,17 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", indirect.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (indirect.start < buf->used) {
|
||||
DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
|
||||
indirect.start, buf->used);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -2528,7 +2528,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
|
|||
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
vertex.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -2539,23 +2539,23 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
|
|||
if (buf->filp != filp) {
|
||||
DRM_ERROR("process %d using buffer owned by %p\n",
|
||||
DRM_CURRENTPID, buf->filp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (buf->pending) {
|
||||
DRM_ERROR("sending pending buffer %d\n", vertex.idx);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) {
|
||||
drm_radeon_prim_t prim;
|
||||
drm_radeon_tcl_prim_t tclprim;
|
||||
|
||||
if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if (prim.stateidx != laststate) {
|
||||
drm_radeon_state_t state;
|
||||
|
@ -2563,11 +2563,11 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
|
|||
if (DRM_COPY_FROM_USER(&state,
|
||||
&vertex.state[prim.stateidx],
|
||||
sizeof(state)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if (radeon_emit_state2(dev_priv, filp_priv, &state)) {
|
||||
DRM_ERROR("radeon_emit_state2 failed\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
laststate = prim.stateidx;
|
||||
|
@ -2613,19 +2613,19 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
|
|||
RING_LOCALS;
|
||||
|
||||
if (id >= RADEON_MAX_STATE_PACKETS)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
sz = packet[id].len;
|
||||
reg = packet[id].start;
|
||||
|
||||
if (sz * sizeof(int) > cmdbuf->bufsz) {
|
||||
DRM_ERROR("Packet size provided larger than data provided\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) {
|
||||
DRM_ERROR("Packet verification failed\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BEGIN_RING(sz + 1);
|
||||
|
@ -2713,7 +2713,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
|
|||
if (!sz)
|
||||
return 0;
|
||||
if (sz * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
BEGIN_RING(5 + sz);
|
||||
OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
|
||||
|
@ -2781,7 +2781,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
|
|||
do {
|
||||
if (i < cmdbuf->nbox) {
|
||||
if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
/* FIXME The second and subsequent times round
|
||||
* this loop, send a WAIT_UNTIL_3D_IDLE before
|
||||
* calling emit_clip_rect(). This fixes a
|
||||
|
@ -2839,7 +2839,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
|
|||
ADVANCE_RING();
|
||||
break;
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2870,7 +2870,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
|
|||
VB_AGE_TEST_WITH_RETURN(dev_priv);
|
||||
|
||||
if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
|
||||
|
@ -2881,11 +2881,11 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
|
|||
if (orig_bufsz != 0) {
|
||||
kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
|
||||
if (kbuf == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
|
||||
cmdbuf.bufsz)) {
|
||||
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
cmdbuf.buf = kbuf;
|
||||
}
|
||||
|
@ -3012,7 +3012,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
|
|||
err:
|
||||
if (orig_bufsz != 0)
|
||||
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
||||
|
@ -3074,7 +3074,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
case RADEON_PARAM_SCRATCH_OFFSET:
|
||||
if (!dev_priv->writeback_works)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
value = RADEON_SCRATCH_REG_OFFSET;
|
||||
break;
|
||||
case RADEON_PARAM_CARD_TYPE:
|
||||
|
@ -3090,12 +3090,12 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
default:
|
||||
DRM_DEBUG("Invalid parameter %d\n", param.param);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3149,7 +3149,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
default:
|
||||
DRM_DEBUG("Invalid parameter %d\n", sp.param);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -60,7 +60,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
|
|||
DRM_ERROR("failed!\n");
|
||||
DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -81,7 +81,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
|
|||
DRM_ERROR("failed!\n");
|
||||
DRM_INFO(" status=0x%08x\n", status);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -102,7 +102,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
|
|||
DRM_ERROR("failed!\n");
|
||||
DRM_INFO(" status=0x%08x\n", status);
|
||||
#endif
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -136,7 +136,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
|
|||
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
|
||||
#endif
|
||||
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -158,7 +158,7 @@ savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
|
|||
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
|
||||
#endif
|
||||
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
|
||||
|
@ -301,7 +301,7 @@ static int savage_dma_init(drm_savage_private_t * dev_priv)
|
|||
dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
|
||||
dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
|
||||
if (dev_priv->dma_pages == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
|
||||
SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
|
||||
|
@ -541,7 +541,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
|
||||
dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dev_priv, 0, sizeof(drm_savage_private_t));
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -682,16 +682,16 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
|
||||
if (init->fb_bpp != 16 && init->fb_bpp != 32) {
|
||||
DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (init->depth_bpp != 16 && init->depth_bpp != 32) {
|
||||
DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (init->dma_type != SAVAGE_DMA_AGP &&
|
||||
init->dma_type != SAVAGE_DMA_PCI) {
|
||||
DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->cob_size = init->cob_size;
|
||||
|
@ -715,14 +715,14 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (init->status_offset != 0) {
|
||||
dev_priv->status = drm_core_findmap(dev, init->status_offset);
|
||||
if (!dev_priv->status) {
|
||||
DRM_ERROR("could not find shadow status region!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
dev_priv->status = NULL;
|
||||
|
@ -734,13 +734,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
if (!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find DMA buffer region!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
drm_core_ioremap(dev->agp_buffer_map, dev);
|
||||
if (!dev->agp_buffer_map) {
|
||||
DRM_ERROR("failed to ioremap DMA buffer region!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (init->agp_textures_offset) {
|
||||
|
@ -749,7 +749,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
if (!dev_priv->agp_textures) {
|
||||
DRM_ERROR("could not find agp texture region!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
dev_priv->agp_textures = NULL;
|
||||
|
@ -760,39 +760,39 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
DRM_ERROR("command DMA not supported on "
|
||||
"Savage3D/MX/IX.\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev->dma && dev->dma->buflist) {
|
||||
DRM_ERROR("command and vertex DMA not supported "
|
||||
"at the same time.\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
|
||||
if (!dev_priv->cmd_dma) {
|
||||
DRM_ERROR("could not find command DMA region!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
|
||||
if (dev_priv->cmd_dma->type != _DRM_AGP) {
|
||||
DRM_ERROR("AGP command DMA region is not a "
|
||||
"_DRM_AGP map!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
drm_core_ioremap(dev_priv->cmd_dma, dev);
|
||||
if (!dev_priv->cmd_dma->handle) {
|
||||
DRM_ERROR("failed to ioremap command "
|
||||
"DMA region!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
|
||||
DRM_ERROR("PCI command DMA region is not a "
|
||||
"_DRM_CONSISTENT map!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
dev_priv->cmd_dma = NULL;
|
||||
|
@ -809,7 +809,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
if (!dev_priv->fake_dma.handle) {
|
||||
DRM_ERROR("could not allocate faked DMA buffer!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_priv->cmd_dma = &dev_priv->fake_dma;
|
||||
dev_priv->dma_flush = savage_fake_dma_flush;
|
||||
|
@ -886,13 +886,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
if (savage_freelist_init(dev) < 0) {
|
||||
DRM_ERROR("could not initialize freelist\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (savage_dma_init(dev_priv) < 0) {
|
||||
DRM_ERROR("could not initialize command DMA\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -945,7 +945,7 @@ static int savage_bci_init(DRM_IOCTL_ARGS)
|
|||
return savage_do_cleanup_bci(dev);
|
||||
}
|
||||
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int savage_bci_event_emit(DRM_IOCTL_ARGS)
|
||||
|
@ -1015,16 +1015,16 @@ static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct d
|
|||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = savage_freelist_get(dev);
|
||||
if (!buf)
|
||||
return DRM_ERR(EAGAIN);
|
||||
return -EAGAIN;
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
if (DRM_COPY_TO_USER(&d->request_indices[i],
|
||||
&buf->idx, sizeof(buf->idx)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
if (DRM_COPY_TO_USER(&d->request_sizes[i],
|
||||
&buf->total, sizeof(buf->total)))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
d->granted_count++;
|
||||
}
|
||||
|
@ -1047,7 +1047,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.send_count != 0) {
|
||||
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
|
||||
DRM_CURRENTPID, d.send_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* We'll send you buffers.
|
||||
|
@ -1055,7 +1055,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
|
|||
if (d.request_count < 0 || d.request_count > dma->buf_count) {
|
||||
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
|
||||
DRM_CURRENTPID, d.request_count, dma->buf_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
d.granted_count = 0;
|
||||
|
|
|
@ -83,7 +83,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
|
|||
{
|
||||
if ((addr & 6) != 2) { /* reserved bits */
|
||||
DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!(addr & 1)) { /* local */
|
||||
addr &= ~7;
|
||||
|
@ -92,13 +92,13 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
|
|||
DRM_ERROR
|
||||
("bad texAddr%d %08x (local addr out of range)\n",
|
||||
unit, addr);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else { /* AGP */
|
||||
if (!dev_priv->agp_textures) {
|
||||
DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
|
||||
unit, addr);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
addr &= ~7;
|
||||
if (addr < dev_priv->agp_textures->offset ||
|
||||
|
@ -107,7 +107,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
|
|||
DRM_ERROR
|
||||
("bad texAddr%d %08x (AGP addr out of range)\n",
|
||||
unit, addr);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -133,7 +133,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
|
|||
start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
|
||||
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
|
||||
start, start + count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
|
||||
|
@ -165,7 +165,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
|
|||
start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
|
||||
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
|
||||
start, start + count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
|
||||
|
@ -289,7 +289,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
|
|||
|
||||
if (!dmabuf) {
|
||||
DRM_ERROR("called without dma buffers!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!n)
|
||||
|
@ -303,7 +303,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
|
|||
if (n % 3 != 0) {
|
||||
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
|
||||
n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case SAVAGE_PRIM_TRISTRIP:
|
||||
|
@ -312,18 +312,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
|
|||
DRM_ERROR
|
||||
("wrong number of vertices %u in TRIFAN/STRIP\n",
|
||||
n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid primitive type %u\n", prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
|
||||
if (skip != 0) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
|
||||
|
@ -331,18 +331,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
|
|||
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
|
||||
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (reorder) {
|
||||
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (start + n > dmabuf->total / 32) {
|
||||
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
|
||||
start, start + n - 1, dmabuf->total / 32);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Vertex DMA doesn't work with command DMA at the same time,
|
||||
|
@ -440,7 +440,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
if (n % 3 != 0) {
|
||||
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
|
||||
n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case SAVAGE_PRIM_TRISTRIP:
|
||||
|
@ -449,24 +449,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
DRM_ERROR
|
||||
("wrong number of vertices %u in TRIFAN/STRIP\n",
|
||||
n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid primitive type %u\n", prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
|
||||
if (skip > SAVAGE_SKIP_ALL_S3D) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
vtx_size = 8; /* full vertex */
|
||||
} else {
|
||||
if (skip > SAVAGE_SKIP_ALL_S4) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
vtx_size = 10; /* full vertex */
|
||||
}
|
||||
|
@ -478,13 +478,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
|
|||
if (vtx_size > vb_stride) {
|
||||
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
|
||||
vtx_size, vb_stride);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (start + n > vb_size / (vb_stride * 4)) {
|
||||
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
|
||||
start, start + n - 1, vb_size / (vb_stride * 4));
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
prim <<= 25;
|
||||
|
@ -547,7 +547,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
|
||||
if (!dmabuf) {
|
||||
DRM_ERROR("called without dma buffers!\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!n)
|
||||
|
@ -560,7 +560,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
case SAVAGE_PRIM_TRILIST:
|
||||
if (n % 3 != 0) {
|
||||
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case SAVAGE_PRIM_TRISTRIP:
|
||||
|
@ -568,18 +568,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
if (n < 3) {
|
||||
DRM_ERROR
|
||||
("wrong number of indices %u in TRIFAN/STRIP\n", n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid primitive type %u\n", prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
|
||||
if (skip != 0) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
|
||||
|
@ -587,11 +587,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
|
||||
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (reorder) {
|
||||
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -628,7 +628,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
|
|||
if (idx[i] > dmabuf->total / 32) {
|
||||
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
|
||||
i, idx[i], dmabuf->total / 32);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -698,7 +698,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
case SAVAGE_PRIM_TRILIST:
|
||||
if (n % 3 != 0) {
|
||||
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case SAVAGE_PRIM_TRISTRIP:
|
||||
|
@ -706,24 +706,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
if (n < 3) {
|
||||
DRM_ERROR
|
||||
("wrong number of indices %u in TRIFAN/STRIP\n", n);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid primitive type %u\n", prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
|
||||
if (skip > SAVAGE_SKIP_ALL_S3D) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
vtx_size = 8; /* full vertex */
|
||||
} else {
|
||||
if (skip > SAVAGE_SKIP_ALL_S4) {
|
||||
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
vtx_size = 10; /* full vertex */
|
||||
}
|
||||
|
@ -735,7 +735,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
if (vtx_size > vb_stride) {
|
||||
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
|
||||
vtx_size, vb_stride);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
prim <<= 25;
|
||||
|
@ -748,7 +748,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
|
|||
if (idx[i] > vb_size / (vb_stride * 4)) {
|
||||
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
|
||||
i, idx[i], vb_size / (vb_stride * 4));
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -942,7 +942,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
|
|||
DRM_ERROR("IMPLEMENTATION ERROR: "
|
||||
"non-drawing-command %d\n",
|
||||
cmd_header.cmd.cmd);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ret != 0)
|
||||
|
@ -979,7 +979,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR
|
||||
("vertex buffer index %u out of range (0-%u)\n",
|
||||
cmdbuf.dma_idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
dmabuf = dma->buflist[cmdbuf.dma_idx];
|
||||
} else {
|
||||
|
@ -994,26 +994,26 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
if (cmdbuf.size) {
|
||||
kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
|
||||
if (kcmd_addr == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
|
||||
cmdbuf.size * 8))
|
||||
{
|
||||
drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
cmdbuf.cmd_addr = kcmd_addr;
|
||||
}
|
||||
if (cmdbuf.vb_size) {
|
||||
kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
|
||||
if (kvb_addr == NULL) {
|
||||
ret = DRM_ERR(ENOMEM);
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
|
||||
cmdbuf.vb_size)) {
|
||||
ret = DRM_ERR(EFAULT);
|
||||
ret = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
cmdbuf.vb_addr = kvb_addr;
|
||||
|
@ -1022,13 +1022,13 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect),
|
||||
DRM_MEM_DRIVER);
|
||||
if (kbox_addr == NULL) {
|
||||
ret = DRM_ERR(ENOMEM);
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
|
||||
cmdbuf.nbox * sizeof(struct drm_clip_rect))) {
|
||||
ret = DRM_ERR(EFAULT);
|
||||
ret = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
cmdbuf.box_addr = kbox_addr;
|
||||
|
@ -1061,7 +1061,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR("indexed drawing command extends "
|
||||
"beyond end of command buffer\n");
|
||||
DMA_FLUSH();
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* fall through */
|
||||
case SAVAGE_CMD_DMA_PRIM:
|
||||
|
@ -1094,7 +1094,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR("command SAVAGE_CMD_STATE extends "
|
||||
"beyond end of command buffer\n");
|
||||
DMA_FLUSH();
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
ret = savage_dispatch_state(dev_priv, &cmd_header,
|
||||
|
@ -1107,7 +1107,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
|
||||
"beyond end of command buffer\n");
|
||||
DMA_FLUSH();
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
ret = savage_dispatch_clear(dev_priv, &cmd_header,
|
||||
|
@ -1123,7 +1123,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
|
|||
default:
|
||||
DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
|
||||
DMA_FLUSH();
|
||||
ret = DRM_ERR(EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
|
||||
dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
dev_priv->chipset = chipset;
|
||||
|
|
|
@ -140,7 +140,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv,
|
|||
dev_priv->agp_initialized)) {
|
||||
DRM_ERROR
|
||||
("Attempt to allocate from uninitialized memory manager.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
|
||||
|
@ -159,7 +159,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv,
|
|||
mem.offset = 0;
|
||||
mem.size = 0;
|
||||
mem.free = 0;
|
||||
retval = DRM_ERR(ENOMEM);
|
||||
retval = -ENOMEM;
|
||||
}
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem));
|
||||
|
|
|
@ -175,24 +175,24 @@ static int via_initialize(struct drm_device * dev,
|
|||
{
|
||||
if (!dev_priv || !dev_priv->mmio) {
|
||||
DRM_ERROR("via_dma_init called before via_map_init\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (dev_priv->ring.virtual_start != NULL) {
|
||||
DRM_ERROR("%s called again without calling cleanup\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!dev->agp || !dev->agp->base) {
|
||||
DRM_ERROR("%s called with no agp memory available\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (dev_priv->chipset == VIA_DX9_0) {
|
||||
DRM_ERROR("AGP DMA is not supported on this chip\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->ring.map.offset = dev->agp->base + init->offset;
|
||||
|
@ -207,7 +207,7 @@ static int via_initialize(struct drm_device * dev,
|
|||
via_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
@ -240,22 +240,22 @@ static int via_dma_init(DRM_IOCTL_ARGS)
|
|||
switch (init.func) {
|
||||
case VIA_INIT_DMA:
|
||||
if (!DRM_SUSER(DRM_CURPROC))
|
||||
retcode = DRM_ERR(EPERM);
|
||||
retcode = -EPERM;
|
||||
else
|
||||
retcode = via_initialize(dev, dev_priv, &init);
|
||||
break;
|
||||
case VIA_CLEANUP_DMA:
|
||||
if (!DRM_SUSER(DRM_CURPROC))
|
||||
retcode = DRM_ERR(EPERM);
|
||||
retcode = -EPERM;
|
||||
else
|
||||
retcode = via_dma_cleanup(dev);
|
||||
break;
|
||||
case VIA_DMA_INITIALIZED:
|
||||
retcode = (dev_priv->ring.virtual_start != NULL) ?
|
||||
0 : DRM_ERR(EFAULT);
|
||||
0 : -EFAULT;
|
||||
break;
|
||||
default:
|
||||
retcode = DRM_ERR(EINVAL);
|
||||
retcode = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -273,15 +273,15 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
|
|||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (cmd->size > VIA_PCI_BUF_SIZE) {
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Running this function on AGP memory is dead slow. Therefore
|
||||
|
@ -297,7 +297,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
|
|||
|
||||
vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
|
||||
if (vb == NULL) {
|
||||
return DRM_ERR(EAGAIN);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
memcpy(vb, dev_priv->pci_buf, cmd->size);
|
||||
|
@ -321,7 +321,7 @@ int via_driver_dma_quiescent(struct drm_device * dev)
|
|||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (!via_wait_idle(dev_priv)) {
|
||||
return DRM_ERR(EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -363,10 +363,10 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
|
|||
int ret;
|
||||
|
||||
if (cmd->size > VIA_PCI_BUF_SIZE) {
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
|
||||
if ((ret =
|
||||
via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
|
||||
|
@ -669,7 +669,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS)
|
|||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
|
||||
|
@ -687,7 +687,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS)
|
|||
}
|
||||
if (!count) {
|
||||
DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
|
||||
ret = DRM_ERR(EAGAIN);
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
break;
|
||||
case VIA_CMDBUF_LAG:
|
||||
|
@ -699,11 +699,11 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS)
|
|||
}
|
||||
if (!count) {
|
||||
DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
|
||||
ret = DRM_ERR(EAGAIN);
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = DRM_ERR(EFAULT);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
d_siz.size = tmp_size;
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
first_pfn + 1;
|
||||
|
||||
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages(current, current->mm,
|
||||
|
@ -251,7 +251,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
vsg->state = dr_via_pages_locked;
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
vsg->state = dr_via_pages_locked;
|
||||
DRM_DEBUG("DMA pages locked\n");
|
||||
|
@ -274,13 +274,13 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
|
|||
vsg->descriptors_per_page;
|
||||
|
||||
if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
vsg->state = dr_via_desc_pages_alloc;
|
||||
for (i=0; i<vsg->num_desc_pages; ++i) {
|
||||
if (NULL == (vsg->desc_pages[i] =
|
||||
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
|
||||
vsg->num_desc);
|
||||
|
@ -593,7 +593,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
|
||||
if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
|
||||
DRM_ERROR("Zero size bitblt.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -606,7 +606,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
|
||||
DRM_ERROR("Too large system memory stride. Stride: %d, "
|
||||
"Length: %d\n", xfer->mem_stride, xfer->line_length);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((xfer->mem_stride == xfer->line_length) &&
|
||||
|
@ -624,7 +624,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
|
||||
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
|
||||
DRM_ERROR("Too large PCI DMA bitblt.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -635,7 +635,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
if (xfer->mem_stride < xfer->line_length ||
|
||||
abs(xfer->fb_stride) < xfer->line_length) {
|
||||
DRM_ERROR("Invalid frame-buffer / memory stride.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -648,7 +648,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
|
||||
((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
if ((((unsigned long)xfer->mem_addr & 15) ||
|
||||
|
@ -656,7 +656,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
|
|||
((xfer->num_lines > 1) &&
|
||||
((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
|
||||
DRM_ERROR("Invalid DRM bitblt alignment.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -696,7 +696,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
|
|||
|
||||
DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
|
||||
if (ret) {
|
||||
return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
|
||||
return (-EINTR == ret) ? -EAGAIN : ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&blitq->blit_lock, irqsave);
|
||||
|
@ -740,7 +740,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
|||
|
||||
if (dev_priv == NULL) {
|
||||
DRM_ERROR("Called without initialization.\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
engine = (xfer->to_fb) ? 0 : 1;
|
||||
|
@ -750,7 +750,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
|
|||
}
|
||||
if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
|
||||
via_dmablit_release_slot(blitq);
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
|
||||
via_dmablit_release_slot(blitq);
|
||||
|
@ -790,12 +790,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS )
|
|||
DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
|
||||
|
||||
if (sync.engine >= VIA_NUM_BLIT_ENGINES)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
|
||||
|
||||
if (DRM_ERR(EINTR) == err)
|
||||
err = DRM_ERR(EAGAIN);
|
||||
if (-EINTR == err)
|
||||
err = -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -205,13 +205,13 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
|
|||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (irq >= drm_via_irq_num) {
|
||||
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
|
||||
irq);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
real_irq = dev_priv->irq_map[irq];
|
||||
|
@ -219,7 +219,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
|
|||
if (real_irq < 0) {
|
||||
DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
|
||||
__FUNCTION__, irq);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
masks = dev_priv->irq_masks;
|
||||
|
@ -343,13 +343,13 @@ int via_wait_irq(DRM_IOCTL_ARGS)
|
|||
int force_sequence;
|
||||
|
||||
if (!dev->irq)
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait));
|
||||
if (irqwait.request.irq >= dev_priv->num_irqs) {
|
||||
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
|
||||
irqwait.request.irq);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cur_irq += irqwait.request.irq;
|
||||
|
@ -361,13 +361,13 @@ int via_wait_irq(DRM_IOCTL_ARGS)
|
|||
case VIA_IRQ_ABSOLUTE:
|
||||
break;
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (irqwait.request.type & VIA_IRQ_SIGNAL) {
|
||||
DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE);
|
||||
|
|
|
@ -102,7 +102,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
|
||||
dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
|
|||
|
||||
if (mem.type > VIA_MEM_AGP) {
|
||||
DRM_ERROR("Unknown memory type allocation\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
|
||||
|
@ -144,7 +144,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
|
|||
DRM_ERROR
|
||||
("Attempt to allocate from uninitialized memory manager.\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
|
||||
|
@ -162,7 +162,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
|
|||
mem.size = 0;
|
||||
mem.index = 0;
|
||||
DRM_DEBUG("Video memory allocation failed\n");
|
||||
retval = DRM_ERR(ENOMEM);
|
||||
retval = -ENOMEM;
|
||||
}
|
||||
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem));
|
||||
|
||||
|
|
|
@ -1026,12 +1026,12 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
|
|||
case state_error:
|
||||
default:
|
||||
*hc_state = saved_state;
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
if (state == state_error) {
|
||||
*hc_state = saved_state;
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1082,11 +1082,11 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
|
|||
break;
|
||||
case state_error:
|
||||
default:
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
if (state == state_error) {
|
||||
return DRM_ERR(EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче