Merge branch 'drm-nouveau-destage' of git://people.freedesktop.org/~airlied/linux
Pull nouveau destaging + Kelper modesetting support from Dave Airlie: "This pull request is unexpected and not something I had mentioned previously. So NVIDIA announced new Kepler GPUs this morning, and Ben has killed himself getting modesetting support for them together to have on launch day. Most of the code to support the new chips has already gone in, however this pull contains a few more pieces along with the final enables so the driver binds to the new Kepler cards. Its quite amazing that nouveau can support a GPU on its launch day even if its just unaccelerated modesetting, and I'd like to have support in the next kernel. In order to sweeten the deal, Ben has also requested nouveau destage and become ABI stable, the only change is the version number bump which he prepared userspace for quite a long time ago. The driver hasn't broken ABI since that one big break that caused a lot of fuss. It's also quite a small set of code, and not likely to break anything." * 'drm-nouveau-destage' of git://people.freedesktop.org/~airlied/linux: drm/nouveau/dp: support version 4.0 of DP table drm/nve0/disp: nvidia randomly decided to move the dithering method drm/nve0: initial modesetting support for kepler chipsets drm/nouveau: add bios connector type for dms59 drm/nouveau: move out of staging drivers drm/nouveau: bump version to 1.0.0 drm/nvd0/disp: ignore clock set if no pclk drm/nouveau: oops, increase channel dispc_vma to 4 drm/nouveau: inform userspace of new kernel subchannel requirements drm/nouveau: remove m2mf creation on userspace channels drm/nvc0-/disp: reimplement flip completion method as fifo method drm/nouveau: move fence sequence check to start of loop drm/nouveau: remove subchannel names from places where it doesn't matter drm/nouveau/ttm: always do buffer moves on kernel channel
This commit is contained in:
Коммит
ba331d5dec
|
@ -88,6 +88,8 @@ config DRM_RADEON
|
|||
|
||||
source "drivers/gpu/drm/radeon/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/nouveau/Kconfig"
|
||||
|
||||
config DRM_I810
|
||||
tristate "Intel I810"
|
||||
# !PREEMPT because of missing ioctl locking
|
||||
|
|
|
@ -1144,7 +1144,8 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
|
|||
break;
|
||||
case 1:
|
||||
case 2:
|
||||
if (!(entry[5] & cond))
|
||||
if ((table[0] < 0x40 && !(entry[5] & cond)) ||
|
||||
(table[0] == 0x40 && !(entry[4] & cond)))
|
||||
iexec->execute = false;
|
||||
break;
|
||||
case 5:
|
||||
|
|
|
@ -69,6 +69,8 @@ enum dcb_connector_type {
|
|||
DCB_CONNECTOR_TV_3 = 0x13,
|
||||
DCB_CONNECTOR_DVI_I = 0x30,
|
||||
DCB_CONNECTOR_DVI_D = 0x31,
|
||||
DCB_CONNECTOR_DMS59_0 = 0x38,
|
||||
DCB_CONNECTOR_DMS59_1 = 0x39,
|
||||
DCB_CONNECTOR_LVDS = 0x40,
|
||||
DCB_CONNECTOR_LVDS_SPWG = 0x41,
|
||||
DCB_CONNECTOR_DP = 0x46,
|
||||
|
|
|
@ -693,16 +693,12 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
|||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_channel *chan = chan = dev_priv->channel;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
chan = nvbo->channel;
|
||||
if (!chan) {
|
||||
chan = dev_priv->channel;
|
||||
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
|
||||
}
|
||||
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
|
||||
|
||||
/* create temporary vmas for the transfer and attach them to the
|
||||
* old nouveau_mem node, these will get cleaned up after ttm has
|
||||
|
@ -734,8 +730,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
|||
}
|
||||
|
||||
out:
|
||||
if (chan == dev_priv->channel)
|
||||
mutex_unlock(&chan->mutex);
|
||||
mutex_unlock(&chan->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
|
||||
struct nouveau_channel *chan;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
/* allocate and lock channel structure */
|
||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||
|
@ -184,7 +184,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
return ret;
|
||||
}
|
||||
|
||||
nouveau_dma_pre_init(chan);
|
||||
nouveau_dma_init(chan);
|
||||
chan->user_put = 0x40;
|
||||
chan->user_get = 0x44;
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
|
@ -202,9 +202,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
|
||||
pfifo->reassign(dev, true);
|
||||
|
||||
ret = nouveau_dma_init(chan);
|
||||
if (!ret)
|
||||
ret = nouveau_fence_channel_init(chan);
|
||||
/* Insert NOPs for NOUVEAU_DMA_SKIPS */
|
||||
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
|
||||
if (ret) {
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
|
||||
OUT_RING (chan, 0x00000000);
|
||||
FIRE_RING(chan);
|
||||
|
||||
ret = nouveau_fence_channel_init(chan);
|
||||
if (ret) {
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
|
@ -427,18 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
init->subchan[0].handle = NvM2MF;
|
||||
if (dev_priv->card_type < NV_50)
|
||||
init->subchan[0].grclass = 0x0039;
|
||||
else
|
||||
init->subchan[0].grclass = 0x5039;
|
||||
init->subchan[1].handle = NvSw;
|
||||
init->subchan[1].grclass = NV_SW;
|
||||
init->nr_subchan = 2;
|
||||
} else {
|
||||
init->subchan[0].handle = 0x9039;
|
||||
init->subchan[0].grclass = 0x9039;
|
||||
init->subchan[0].handle = NvSw;
|
||||
init->subchan[0].grclass = NV_SW;
|
||||
init->nr_subchan = 1;
|
||||
} else {
|
||||
init->nr_subchan = 0;
|
||||
}
|
||||
|
||||
/* Named memory object area */
|
||||
|
|
|
@ -867,6 +867,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
|
|||
case DCB_CONNECTOR_TV_0 :
|
||||
case DCB_CONNECTOR_TV_1 :
|
||||
case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
|
||||
case DCB_CONNECTOR_DMS59_0 :
|
||||
case DCB_CONNECTOR_DMS59_1 :
|
||||
case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
|
||||
case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
|
||||
case DCB_CONNECTOR_LVDS :
|
||||
|
@ -1013,13 +1015,10 @@ nouveau_connector_create(struct drm_device *dev, int index)
|
|||
|
||||
/* Add overscan compensation options to digital outputs */
|
||||
if (disp->underscan_property &&
|
||||
(nv_connector->type == DCB_CONNECTOR_DVI_D ||
|
||||
nv_connector->type == DCB_CONNECTOR_DVI_I ||
|
||||
nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
|
||||
nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
|
||||
nv_connector->type == DCB_CONNECTOR_DP ||
|
||||
nv_connector->type == DCB_CONNECTOR_DMS59_DP0 ||
|
||||
nv_connector->type == DCB_CONNECTOR_DMS59_DP1)) {
|
||||
(type == DRM_MODE_CONNECTOR_DVID ||
|
||||
type == DRM_MODE_CONNECTOR_DVII ||
|
||||
type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
type == DRM_MODE_CONNECTOR_DisplayPort)) {
|
||||
drm_connector_attach_property(connector,
|
||||
disp->underscan_property,
|
||||
UNDERSCAN_OFF);
|
||||
|
|
|
@ -441,15 +441,19 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
|
|||
goto fail;
|
||||
|
||||
/* Emit the pageflip */
|
||||
ret = RING_SPACE(chan, 2);
|
||||
ret = RING_SPACE(chan, 3);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
|
||||
else
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
|
||||
OUT_RING (chan, 0);
|
||||
OUT_RING (chan, 0x00000000);
|
||||
OUT_RING (chan, 0x00000000);
|
||||
} else {
|
||||
BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
|
||||
OUT_RING (chan, ++chan->fence.sequence);
|
||||
BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
|
||||
}
|
||||
FIRE_RING (chan);
|
||||
|
||||
ret = nouveau_fence_new(chan, pfence, true);
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "nouveau_ramht.h"
|
||||
|
||||
void
|
||||
nouveau_dma_pre_init(struct nouveau_channel *chan)
|
||||
nouveau_dma_init(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nouveau_bo *pushbuf = chan->pushbuf_bo;
|
||||
|
@ -54,65 +54,6 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
|
|||
chan->dma.free = chan->dma.max - chan->dma.cur;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_dma_init(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
|
||||
OUT_RING (chan, 0x00009039);
|
||||
FIRE_RING (chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
|
||||
0x0039 : 0x5039);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
|
||||
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
|
||||
&chan->m2mf_ntfy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
|
||||
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
|
||||
OUT_RING(chan, 0);
|
||||
|
||||
/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
|
||||
ret = RING_SPACE(chan, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
|
||||
OUT_RING (chan, NvM2MF);
|
||||
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
|
||||
OUT_RING (chan, NvNotify0);
|
||||
OUT_RING (chan, chan->vram_handle);
|
||||
OUT_RING (chan, chan->gart_handle);
|
||||
|
||||
/* Sit back and pray the channel works.. */
|
||||
FIRE_RING(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
|
||||
{
|
||||
|
|
|
@ -48,8 +48,8 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
|
|||
|
||||
/* Hardcoded object assignments to subchannels (subchannel id). */
|
||||
enum {
|
||||
NvSubM2MF = 0,
|
||||
NvSubSw = 1,
|
||||
NvSubSw = 0,
|
||||
NvSubM2MF = 1,
|
||||
NvSub2D = 2,
|
||||
NvSubCtxSurf2D = 2,
|
||||
NvSubGdiRect = 3,
|
||||
|
|
|
@ -188,6 +188,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
|
|||
case 0x20:
|
||||
case 0x21:
|
||||
case 0x30:
|
||||
case 0x40:
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]);
|
||||
|
@ -366,6 +367,10 @@ dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
|
|||
if (table[0] >= 0x20 && table[0] <= 0x30) {
|
||||
if (enable) script = ROM16(entry[12]);
|
||||
else script = ROM16(entry[14]);
|
||||
} else
|
||||
if (table[0] == 0x40) {
|
||||
if (enable) script = ROM16(entry[11]);
|
||||
else script = ROM16(entry[13]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,6 +385,9 @@ dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
|
|||
if (table) {
|
||||
if (table[0] >= 0x20 && table[0] <= 0x30)
|
||||
script = ROM16(entry[6]);
|
||||
else
|
||||
if (table[0] == 0x40)
|
||||
script = ROM16(entry[5]);
|
||||
}
|
||||
|
||||
nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
|
||||
|
@ -393,6 +401,9 @@ dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
|
|||
if (table) {
|
||||
if (table[0] >= 0x20 && table[0] <= 0x30)
|
||||
script = ROM16(entry[8]);
|
||||
else
|
||||
if (table[0] == 0x40)
|
||||
script = ROM16(entry[7]);
|
||||
}
|
||||
|
||||
nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
|
||||
|
|
|
@ -26,15 +26,15 @@
|
|||
#define __NOUVEAU_DRV_H__
|
||||
|
||||
#define DRIVER_AUTHOR "Stephane Marchesin"
|
||||
#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
|
||||
#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
|
||||
|
||||
#define DRIVER_NAME "nouveau"
|
||||
#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
|
||||
#define DRIVER_DATE "20090420"
|
||||
#define DRIVER_DATE "20120316"
|
||||
|
||||
#define DRIVER_MAJOR 0
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 16
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#define NOUVEAU_FAMILY 0x0000FFFF
|
||||
#define NOUVEAU_FLAGS 0xFFFF0000
|
||||
|
@ -113,8 +113,6 @@ struct nouveau_bo {
|
|||
int pbbo_index;
|
||||
bool validate_mapped;
|
||||
|
||||
struct nouveau_channel *channel;
|
||||
|
||||
struct list_head vma_list;
|
||||
unsigned page_shift;
|
||||
|
||||
|
@ -296,7 +294,7 @@ struct nouveau_channel {
|
|||
|
||||
uint32_t sw_subchannel[8];
|
||||
|
||||
struct nouveau_vma dispc_vma[2];
|
||||
struct nouveau_vma dispc_vma[4];
|
||||
struct {
|
||||
struct nouveau_gpuobj *vblsem;
|
||||
uint32_t vblsem_head;
|
||||
|
@ -703,6 +701,7 @@ enum nouveau_card_type {
|
|||
NV_50 = 0x50,
|
||||
NV_C0 = 0xc0,
|
||||
NV_D0 = 0xd0,
|
||||
NV_E0 = 0xe0,
|
||||
};
|
||||
|
||||
struct drm_nouveau_private {
|
||||
|
@ -1091,8 +1090,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
|
|||
#endif
|
||||
|
||||
/* nouveau_dma.c */
|
||||
extern void nouveau_dma_pre_init(struct nouveau_channel *);
|
||||
extern int nouveau_dma_init(struct nouveau_channel *);
|
||||
extern void nouveau_dma_init(struct nouveau_channel *);
|
||||
extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
|
||||
|
||||
/* nouveau_acpi.c */
|
||||
|
@ -1765,13 +1763,27 @@ nv44_graph_class(struct drm_device *dev)
|
|||
#define NV_MEM_TYPE_VM 0x7f
|
||||
#define NV_MEM_COMP_VM 0x03
|
||||
|
||||
/* FIFO methods */
|
||||
#define NV01_SUBCHAN_OBJECT 0x00000000
|
||||
#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
|
||||
#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
|
||||
#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
|
||||
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
|
||||
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
|
||||
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
|
||||
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
|
||||
#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
|
||||
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
|
||||
#define NV10_SUBCHAN_REF_CNT 0x00000050
|
||||
#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
|
||||
#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
|
||||
#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
|
||||
#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
|
||||
#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
|
||||
#define NV40_SUBCHAN_YIELD 0x00000080
|
||||
|
||||
/* NV_SW object class */
|
||||
#define NV_SW 0x0000506e
|
||||
#define NV_SW_DMA_SEMAPHORE 0x00000060
|
||||
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
|
||||
#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
|
||||
#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
|
||||
#define NV_SW_YIELD 0x00000080
|
||||
#define NV_SW_DMA_VBLSEM 0x0000018c
|
||||
#define NV_SW_VBLSEM_OFFSET 0x00000400
|
||||
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
|
||||
|
|
|
@ -93,18 +93,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||
}
|
||||
|
||||
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
|
||||
sequence = fence->sequence;
|
||||
if (fence->sequence > chan->fence.sequence_ack)
|
||||
break;
|
||||
|
||||
fence->signalled = true;
|
||||
list_del(&fence->entry);
|
||||
|
||||
if (unlikely(fence->work))
|
||||
if (fence->work)
|
||||
fence->work(fence->priv, true);
|
||||
|
||||
kref_put(&fence->refcount, nouveau_fence_del);
|
||||
|
||||
if (sequence == chan->fence.sequence_ack)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&chan->fence.lock);
|
||||
}
|
||||
|
@ -165,9 +164,9 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
|||
|
||||
if (USE_REFCNT(dev)) {
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
BEGIN_RING(chan, NvSubSw, 0x0050, 1);
|
||||
BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
|
||||
else
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
|
||||
BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
|
||||
} else {
|
||||
BEGIN_RING(chan, NvSubSw, 0x0150, 1);
|
||||
}
|
||||
|
@ -344,7 +343,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
|
||||
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
|
||||
OUT_RING (chan, NvSema);
|
||||
OUT_RING (chan, offset);
|
||||
OUT_RING (chan, 1);
|
||||
|
@ -354,9 +353,9 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
|
||||
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
|
||||
OUT_RING (chan, chan->vram_handle);
|
||||
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
|
||||
BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset));
|
||||
OUT_RING (chan, 1);
|
||||
|
@ -366,7 +365,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
|
||||
BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset));
|
||||
OUT_RING (chan, 1);
|
||||
|
@ -397,10 +396,10 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
|
||||
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
|
||||
OUT_RING (chan, NvSema);
|
||||
OUT_RING (chan, offset);
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
|
||||
BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
|
||||
OUT_RING (chan, 1);
|
||||
} else
|
||||
if (dev_priv->chipset < 0xc0) {
|
||||
|
@ -408,9 +407,9 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
|
||||
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
|
||||
OUT_RING (chan, chan->vram_handle);
|
||||
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
|
||||
BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset));
|
||||
OUT_RING (chan, 1);
|
||||
|
@ -420,7 +419,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
|
||||
BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset));
|
||||
OUT_RING (chan, 1);
|
||||
|
@ -510,7 +509,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, 0, 1);
|
||||
BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
|
||||
OUT_RING (chan, NvSw);
|
||||
FIRE_RING (chan);
|
||||
}
|
||||
|
|
|
@ -426,9 +426,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
|
|||
return ret;
|
||||
}
|
||||
|
||||
nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
|
||||
ret = nouveau_bo_validate(nvbo, true, false, false);
|
||||
nvbo->channel = NULL;
|
||||
if (unlikely(ret)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "fail ttm_validate\n");
|
||||
|
@ -678,19 +676,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
return PTR_ERR(bo);
|
||||
}
|
||||
|
||||
/* Mark push buffers as being used on PFIFO, the validation code
|
||||
* will then make sure that if the pushbuf bo moves, that they
|
||||
* happen on the kernel channel, which will in turn cause a sync
|
||||
* to happen before we try and submit the push buffer.
|
||||
*/
|
||||
/* Ensure all push buffers are on validate list */
|
||||
for (i = 0; i < req->nr_push; i++) {
|
||||
if (push[i].bo_index >= req->nr_buffers) {
|
||||
NV_ERROR(dev, "push %d buffer not in list\n", i);
|
||||
ret = -EINVAL;
|
||||
goto out_prevalid;
|
||||
}
|
||||
|
||||
bo[push[i].bo_index].read_domains |= (1 << 31);
|
||||
}
|
||||
|
||||
/* Validate buffer list */
|
||||
|
|
|
@ -479,6 +479,47 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.voltage_get = nouveau_voltage_gpio_get;
|
||||
engine->pm.voltage_set = nouveau_voltage_gpio_set;
|
||||
break;
|
||||
case 0xe0:
|
||||
engine->instmem.init = nvc0_instmem_init;
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
engine->instmem.resume = nvc0_instmem_resume;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->mc.init = nv50_mc_init;
|
||||
engine->mc.takedown = nv50_mc_takedown;
|
||||
engine->timer.init = nv04_timer_init;
|
||||
engine->timer.read = nv04_timer_read;
|
||||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nvc0_fb_init;
|
||||
engine->fb.takedown = nvc0_fb_takedown;
|
||||
engine->fifo.channels = 0;
|
||||
engine->fifo.init = nouveau_stub_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.disable = nvc0_fifo_disable;
|
||||
engine->fifo.enable = nvc0_fifo_enable;
|
||||
engine->fifo.reassign = nvc0_fifo_reassign;
|
||||
engine->fifo.unload_context = nouveau_stub_init;
|
||||
engine->display.early_init = nouveau_stub_init;
|
||||
engine->display.late_takedown = nouveau_stub_takedown;
|
||||
engine->display.create = nvd0_display_create;
|
||||
engine->display.destroy = nvd0_display_destroy;
|
||||
engine->display.init = nvd0_display_init;
|
||||
engine->display.fini = nvd0_display_fini;
|
||||
engine->gpio.init = nv50_gpio_init;
|
||||
engine->gpio.fini = nv50_gpio_fini;
|
||||
engine->gpio.drive = nvd0_gpio_drive;
|
||||
engine->gpio.sense = nvd0_gpio_sense;
|
||||
engine->gpio.irq_enable = nv50_gpio_irq_enable;
|
||||
engine->vram.init = nvc0_vram_init;
|
||||
engine->vram.takedown = nv50_vram_fini;
|
||||
engine->vram.get = nvc0_vram_new;
|
||||
engine->vram.put = nv50_vram_del;
|
||||
engine->vram.flags_valid = nvc0_vram_flags_valid;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
|
||||
return 1;
|
||||
|
@ -552,6 +593,75 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
|
|||
return can_switch;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_card_channel_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->channel)
|
||||
nouveau_channel_put_unlocked(&dev_priv->channel);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_card_channel_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan;
|
||||
int ret, oclass;
|
||||
|
||||
ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
|
||||
dev_priv->channel = chan;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
|
||||
if (dev_priv->card_type <= NV_50) {
|
||||
if (dev_priv->card_type < NV_50)
|
||||
oclass = 0x0039;
|
||||
else
|
||||
oclass = 0x5039;
|
||||
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
|
||||
&chan->m2mf_ntfy);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = RING_SPACE(chan, 6);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
|
||||
OUT_RING (chan, NvM2MF);
|
||||
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
|
||||
OUT_RING (chan, NvNotify0);
|
||||
OUT_RING (chan, chan->vram_handle);
|
||||
OUT_RING (chan, chan->gart_handle);
|
||||
} else
|
||||
if (dev_priv->card_type <= NV_C0) {
|
||||
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
|
||||
OUT_RING (chan, 0x00009039);
|
||||
}
|
||||
|
||||
FIRE_RING (chan);
|
||||
error:
|
||||
if (ret)
|
||||
nouveau_card_channel_fini(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_card_init(struct drm_device *dev)
|
||||
{
|
||||
|
@ -738,17 +848,14 @@ nouveau_card_init(struct drm_device *dev)
|
|||
nouveau_backlight_init(dev);
|
||||
nouveau_pm_init(dev);
|
||||
|
||||
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
|
||||
ret = nouveau_fence_init(dev);
|
||||
if (ret)
|
||||
goto out_pm;
|
||||
ret = nouveau_fence_init(dev);
|
||||
if (ret)
|
||||
goto out_pm;
|
||||
|
||||
ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
|
||||
NvDmaFB, NvDmaTT);
|
||||
if (!dev_priv->noaccel) {
|
||||
ret = nouveau_card_channel_init(dev);
|
||||
if (ret)
|
||||
goto out_fence;
|
||||
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
if (dev->mode_config.num_crtc) {
|
||||
|
@ -762,7 +869,7 @@ nouveau_card_init(struct drm_device *dev)
|
|||
return 0;
|
||||
|
||||
out_chan:
|
||||
nouveau_channel_put_unlocked(&dev_priv->channel);
|
||||
nouveau_card_channel_fini(dev);
|
||||
out_fence:
|
||||
nouveau_fence_fini(dev);
|
||||
out_pm:
|
||||
|
@ -820,11 +927,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
|||
nouveau_display_fini(dev);
|
||||
}
|
||||
|
||||
if (dev_priv->channel) {
|
||||
nouveau_channel_put_unlocked(&dev_priv->channel);
|
||||
nouveau_fence_fini(dev);
|
||||
}
|
||||
|
||||
nouveau_card_channel_fini(dev);
|
||||
nouveau_fence_fini(dev);
|
||||
nouveau_pm_fini(dev);
|
||||
nouveau_backlight_exit(dev);
|
||||
nouveau_display_destroy(dev);
|
||||
|
@ -993,8 +1097,8 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
|
|||
int nouveau_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
unsigned long long offset, length;
|
||||
uint32_t reg0 = ~0, strap;
|
||||
resource_size_t mmio_start_offs;
|
||||
int ret;
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
|
@ -1048,6 +1152,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
|||
case 0xd0:
|
||||
dev_priv->card_type = NV_D0;
|
||||
break;
|
||||
case 0xe0:
|
||||
dev_priv->card_type = NV_E0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1072,17 +1179,20 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
|||
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
|
||||
dev_priv->card_type, reg0);
|
||||
|
||||
/* map the mmio regs */
|
||||
mmio_start_offs = pci_resource_start(dev->pdev, 0);
|
||||
dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
|
||||
/* map the mmio regs, limiting the amount to preserve vmap space */
|
||||
offset = pci_resource_start(dev->pdev, 0);
|
||||
length = pci_resource_len(dev->pdev, 0);
|
||||
if (dev_priv->card_type < NV_E0)
|
||||
length = min(length, (unsigned long long)0x00800000);
|
||||
|
||||
dev_priv->mmio = ioremap(offset, length);
|
||||
if (!dev_priv->mmio) {
|
||||
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
|
||||
"Please report your setup to " DRIVER_EMAIL "\n");
|
||||
ret = -EINVAL;
|
||||
goto err_priv;
|
||||
}
|
||||
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
|
||||
(unsigned long long)mmio_start_offs);
|
||||
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", offset);
|
||||
|
||||
/* determine frequency of timing crystal */
|
||||
strap = nv_rd32(dev, 0x101000);
|
||||
|
@ -1140,7 +1250,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
|||
}
|
||||
} else {
|
||||
dev_priv->ramin_size = 1 * 1024 * 1024;
|
||||
dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
|
||||
dev_priv->ramin = ioremap(offset + NV_RAMIN,
|
||||
dev_priv->ramin_size);
|
||||
if (!dev_priv->ramin) {
|
||||
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
|
||||
|
|
|
@ -474,15 +474,15 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
}
|
||||
|
||||
if (dev_priv->chipset < 0xc0) {
|
||||
BEGIN_RING(chan, NvSubSw, 0x0060, 2);
|
||||
BEGIN_RING(chan, 0, 0x0060, 2);
|
||||
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
|
||||
OUT_RING (chan, dispc->sem.offset);
|
||||
BEGIN_RING(chan, NvSubSw, 0x006c, 1);
|
||||
BEGIN_RING(chan, 0, 0x006c, 1);
|
||||
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
|
||||
BEGIN_RING(chan, NvSubSw, 0x0064, 2);
|
||||
BEGIN_RING(chan, 0, 0x0064, 2);
|
||||
OUT_RING (chan, dispc->sem.offset ^ 0x10);
|
||||
OUT_RING (chan, 0x74b1e000);
|
||||
BEGIN_RING(chan, NvSubSw, 0x0060, 1);
|
||||
BEGIN_RING(chan, 0, 0x0060, 1);
|
||||
if (dev_priv->chipset < 0x84)
|
||||
OUT_RING (chan, NvSema);
|
||||
else
|
||||
|
@ -490,12 +490,12 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
} else {
|
||||
u64 offset = chan->dispc_vma[nv_crtc->index].offset;
|
||||
offset += dispc->sem.offset;
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
|
||||
BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset));
|
||||
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
|
||||
OUT_RING (chan, 0x1002);
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
|
||||
BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
|
||||
OUT_RING (chan, 0x74b1e000);
|
||||
|
|
|
@ -436,6 +436,24 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
|
|||
printk(" on channel 0x%010llx\n", (u64)inst << 12);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = NULL;
|
||||
unsigned long flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) {
|
||||
chan = dev_priv->channels.ptr[chid];
|
||||
if (likely(chan))
|
||||
ret = nouveau_finish_page_flip(chan, NULL);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
|
||||
{
|
||||
|
@ -445,11 +463,21 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
|
|||
u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
|
||||
u32 subc = (addr & 0x00070000);
|
||||
u32 mthd = (addr & 0x00003ffc);
|
||||
u32 show = stat;
|
||||
|
||||
NV_INFO(dev, "PSUBFIFO %d:", unit);
|
||||
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
|
||||
NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
|
||||
unit, chid, subc, mthd, data);
|
||||
if (stat & 0x00200000) {
|
||||
if (mthd == 0x0054) {
|
||||
if (!nvc0_fifo_page_flip(dev, chid))
|
||||
show &= ~0x00200000;
|
||||
}
|
||||
}
|
||||
|
||||
if (show) {
|
||||
NV_INFO(dev, "PFIFO%d:", unit);
|
||||
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
|
||||
NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
|
||||
unit, chid, subc, mthd, data);
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
|
||||
nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
|
||||
|
|
|
@ -333,14 +333,6 @@ nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
nouveau_finish_page_flip(chan, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_graph_init_obj418880(struct drm_device *dev)
|
||||
{
|
||||
|
@ -889,7 +881,6 @@ nvc0_graph_create(struct drm_device *dev)
|
|||
|
||||
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
|
||||
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
|
||||
NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
|
||||
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
|
||||
if (fermi >= 0x9197)
|
||||
NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
|
||||
|
|
|
@ -303,12 +303,12 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
offset = chan->dispc_vma[nv_crtc->index].offset;
|
||||
offset += evo->sem.offset;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
|
||||
BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset));
|
||||
OUT_RING (chan, 0xf00d0000 | evo->sem.value);
|
||||
OUT_RING (chan, 0x1002);
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
|
||||
BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
|
||||
OUT_RING (chan, upper_32_bits(offset));
|
||||
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
|
||||
OUT_RING (chan, 0x74b1e000);
|
||||
|
@ -363,10 +363,12 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
static int
|
||||
nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
|
||||
struct drm_device *dev = nv_crtc->base.dev;
|
||||
struct nouveau_connector *nv_connector;
|
||||
struct drm_connector *connector;
|
||||
u32 *push, mode = 0x00;
|
||||
u32 mthd;
|
||||
|
||||
nv_connector = nouveau_crtc_connector_get(nv_crtc);
|
||||
connector = &nv_connector->base;
|
||||
|
@ -384,9 +386,14 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
|
|||
mode |= nv_connector->dithering_depth;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type < NV_E0)
|
||||
mthd = 0x0490 + (nv_crtc->index * 0x0300);
|
||||
else
|
||||
mthd = 0x04a0 + (nv_crtc->index * 0x0300);
|
||||
|
||||
push = evo_wait(dev, EVO_MASTER, 4);
|
||||
if (push) {
|
||||
evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
|
||||
evo_mthd(push, mthd, 1);
|
||||
evo_data(push, mode);
|
||||
if (update) {
|
||||
evo_mthd(push, 0x0080, 1);
|
||||
|
@ -1219,6 +1226,11 @@ nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
|
|||
if (table[0] == 0x30) {
|
||||
config = entry + table[4];
|
||||
config += table[5] * preem;
|
||||
} else
|
||||
if (table[0] == 0x40) {
|
||||
config = table + table[1];
|
||||
config += table[2] * table[3];
|
||||
config += table[6] * preem;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1251,6 +1263,7 @@ nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
|
|||
table = nouveau_dp_bios_data(dev, dcb, &entry);
|
||||
if (table) {
|
||||
if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
|
||||
else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
|
||||
else entry = NULL;
|
||||
|
||||
while (entry) {
|
||||
|
@ -1661,7 +1674,9 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
|
|||
}
|
||||
|
||||
pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
|
||||
if (mask & 0x00010000) {
|
||||
NV_DEBUG_KMS(dev, "PDISP: crtc %d pclk %d mask 0x%08x\n",
|
||||
crtc, pclk, mask);
|
||||
if (pclk && (mask & 0x00010000)) {
|
||||
nv50_crtc_set_clock(dev, crtc, pclk);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,8 +64,6 @@ source "drivers/staging/phison/Kconfig"
|
|||
|
||||
source "drivers/staging/line6/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/nouveau/Kconfig"
|
||||
|
||||
source "drivers/staging/octeon/Kconfig"
|
||||
|
||||
source "drivers/staging/serqt_usb2/Kconfig"
|
||||
|
|
Загрузка…
Ссылка в новой задаче