Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (390 commits) drm/radeon/kms: disable underscan by default drm/radeon/kms: only enable hdmi features if the monitor supports audio drm: Restore the old_fb upon modeset failure drm/nouveau: fix hwmon device binding radeon: consolidate asic-specific function decls for pre-r600 vga_switcheroo: comparing too few characters in strncmp() drm/radeon/kms: add NI pci ids drm/radeon/kms: don't enable pcie gen2 on NI yet drm/radeon/kms: add radeon_asic struct for NI asics drm/radeon/kms/ni: load default sclk/mclk/vddc at pm init drm/radeon/kms: add ucode loader for NI drm/radeon/kms: add support for DCE5 display LUTs drm/radeon/kms: add ni_reg.h drm/radeon/kms: add bo blit support for NI drm/radeon/kms: always use writeback/events for fences on NI drm/radeon/kms: adjust default clock/vddc tracking for pm on DCE5 drm/radeon/kms: add backend map workaround for barts drm/radeon/kms: fill gpu init for NI asics drm/radeon/kms: add disabled vbios accessor for NI asics drm/radeon/kms: handle NI thermal controller ...
This commit is contained in:
Коммит
5b2eef966c
|
@ -120,7 +120,6 @@ struct agp_bridge_driver {
|
|||
void (*agp_destroy_page)(struct page *, int flags);
|
||||
void (*agp_destroy_pages)(struct agp_memory *);
|
||||
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
|
||||
void (*chipset_flush)(struct agp_bridge_data *);
|
||||
};
|
||||
|
||||
struct agp_bridge_data {
|
||||
|
|
|
@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
break;
|
||||
|
||||
case AGPIOC_CHIPSET_FLUSH32:
|
||||
ret_val = agpioc_chipset_flush_wrap(curr_priv);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
|
|||
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
|
||||
struct agp_memory *agp_find_mem_by_key(int key);
|
||||
struct agp_client *agp_find_client_by_pid(pid_t id);
|
||||
int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
|
||||
|
||||
#endif /* _AGP_COMPAT_H */
|
||||
|
|
|
@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
|
|||
return agp_unbind_memory(memory);
|
||||
}
|
||||
|
||||
int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
|
||||
{
|
||||
DBG("");
|
||||
agp_flush_chipset(agp_bridge);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long agp_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
|
|||
break;
|
||||
|
||||
case AGPIOC_CHIPSET_FLUSH:
|
||||
ret_val = agpioc_chipset_flush_wrap(curr_priv);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -81,13 +81,6 @@ static int agp_get_key(void)
|
|||
return -1;
|
||||
}
|
||||
|
||||
void agp_flush_chipset(struct agp_bridge_data *bridge)
|
||||
{
|
||||
if (bridge->driver->chipset_flush)
|
||||
bridge->driver->chipset_flush(bridge);
|
||||
}
|
||||
EXPORT_SYMBOL(agp_flush_chipset);
|
||||
|
||||
/*
|
||||
* Use kmalloc if possible for the page list. Otherwise fall back to
|
||||
* vmalloc. This speeds things up and also saves memory for small AGP
|
||||
|
@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
|
|||
}
|
||||
EXPORT_SYMBOL(agp_unbind_memory);
|
||||
|
||||
/**
|
||||
* agp_rebind_emmory - Rewrite the entire GATT, useful on resume
|
||||
*/
|
||||
int agp_rebind_memory(void)
|
||||
{
|
||||
struct agp_memory *curr;
|
||||
int ret_val = 0;
|
||||
|
||||
spin_lock(&agp_bridge->mapped_lock);
|
||||
list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
|
||||
ret_val = curr->bridge->driver->insert_memory(curr,
|
||||
curr->pg_start,
|
||||
curr->type);
|
||||
if (ret_val != 0)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&agp_bridge->mapped_lock);
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(agp_rebind_memory);
|
||||
|
||||
/* End - Routines for handling swapping of agp_memory into the GATT */
|
||||
|
||||
|
|
|
@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
|
|||
static int agp_intel_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
|
||||
int ret_val;
|
||||
|
||||
bridge->driver->configure();
|
||||
|
||||
ret_val = agp_rebind_memory();
|
||||
if (ret_val != 0)
|
||||
return ret_val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -75,6 +75,8 @@
|
|||
#define I810_GMS_DISABLE 0x00000000
|
||||
#define I810_PGETBL_CTL 0x2020
|
||||
#define I810_PGETBL_ENABLED 0x00000001
|
||||
/* Note: PGETBL_CTL2 has a different offset on G33. */
|
||||
#define I965_PGETBL_CTL2 0x20c4
|
||||
#define I965_PGETBL_SIZE_MASK 0x0000000e
|
||||
#define I965_PGETBL_SIZE_512KB (0 << 1)
|
||||
#define I965_PGETBL_SIZE_256KB (1 << 1)
|
||||
|
@ -82,9 +84,15 @@
|
|||
#define I965_PGETBL_SIZE_1MB (3 << 1)
|
||||
#define I965_PGETBL_SIZE_2MB (4 << 1)
|
||||
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
|
||||
#define G33_PGETBL_SIZE_MASK (3 << 8)
|
||||
#define G33_PGETBL_SIZE_1M (1 << 8)
|
||||
#define G33_PGETBL_SIZE_2M (2 << 8)
|
||||
#define G33_GMCH_SIZE_MASK (3 << 8)
|
||||
#define G33_GMCH_SIZE_1M (1 << 8)
|
||||
#define G33_GMCH_SIZE_2M (2 << 8)
|
||||
#define G4x_GMCH_SIZE_MASK (0xf << 8)
|
||||
#define G4x_GMCH_SIZE_1M (0x1 << 8)
|
||||
#define G4x_GMCH_SIZE_2M (0x3 << 8)
|
||||
#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
|
||||
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
|
||||
#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
|
||||
|
||||
#define I810_DRAM_CTL 0x3000
|
||||
#define I810_DRAM_ROW_0 0x00000001
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_agp_bind_pages);
|
||||
|
||||
void drm_agp_chipset_flush(struct drm_device *dev)
|
||||
{
|
||||
agp_flush_chipset(dev->agp->bridge);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_chipset_flush);
|
||||
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
|
|
@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
|||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_display_mode *adjusted_mode, saved_mode;
|
||||
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
|
||||
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
int saved_x, saved_y;
|
||||
|
@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
|||
if (!crtc->enabled)
|
||||
return true;
|
||||
|
||||
saved_hwmode = crtc->hwmode;
|
||||
saved_mode = crtc->mode;
|
||||
saved_x = crtc->x;
|
||||
saved_y = crtc->y;
|
||||
|
@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
|||
|
||||
}
|
||||
|
||||
/* Store real post-adjustment hardware mode. */
|
||||
crtc->hwmode = *adjusted_mode;
|
||||
|
||||
/* Calculate and store various constants which
|
||||
* are later needed by vblank and swap-completion
|
||||
* timestamping. They are derived from true hwmode.
|
||||
*/
|
||||
drm_calc_timestamping_constants(crtc);
|
||||
|
||||
/* XXX free adjustedmode */
|
||||
drm_mode_destroy(dev, adjusted_mode);
|
||||
/* FIXME: add subpixel order */
|
||||
done:
|
||||
if (!ret) {
|
||||
crtc->hwmode = saved_hwmode;
|
||||
crtc->mode = saved_mode;
|
||||
crtc->x = saved_x;
|
||||
crtc->y = saved_y;
|
||||
|
@ -650,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
old_fb)) {
|
||||
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
|
||||
set->crtc->base.id);
|
||||
set->crtc->fb = old_fb;
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -664,8 +676,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
set->crtc->fb = set->fb;
|
||||
ret = crtc_funcs->mode_set_base(set->crtc,
|
||||
set->x, set->y, old_fb);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
set->crtc->fb = old_fb;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
|
||||
for (i = 0; i < set->num_connectors; i++) {
|
||||
|
|
|
@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fini);
|
||||
|
||||
void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
|
||||
{
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
|
||||
FB_VISUAL_TRUECOLOR;
|
||||
info->fix.mmio_start = 0;
|
||||
info->fix.mmio_len = 0;
|
||||
info->fix.type_aux = 0;
|
||||
info->fix.xpanstep = 1; /* doing it in hw */
|
||||
info->fix.ypanstep = 1; /* doing it in hw */
|
||||
info->fix.ywrapstep = 0;
|
||||
info->fix.accel = FB_ACCEL_NONE;
|
||||
info->fix.type_aux = 0;
|
||||
|
||||
info->fix.line_length = fb->pitch;
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
|
||||
|
||||
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, u16 regno, struct fb_info *info)
|
||||
{
|
||||
|
@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
|
|||
mutex_unlock(&dev->mode_config.mutex);
|
||||
return ret;
|
||||
}
|
||||
drm_fb_helper_fill_fix(info, fb_helper->fb);
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
|
@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
|||
|
||||
if (new_fb) {
|
||||
info->var.pixclock = 0;
|
||||
drm_fb_helper_fill_fix(info, fb_helper->fb);
|
||||
if (register_framebuffer(info) < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -979,24 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
|
||||
|
||||
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
|
||||
uint32_t depth)
|
||||
{
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
|
||||
FB_VISUAL_TRUECOLOR;
|
||||
info->fix.type_aux = 0;
|
||||
info->fix.xpanstep = 1; /* doing it in hw */
|
||||
info->fix.ypanstep = 1; /* doing it in hw */
|
||||
info->fix.ywrapstep = 0;
|
||||
info->fix.accel = FB_ACCEL_NONE;
|
||||
info->fix.type_aux = 0;
|
||||
|
||||
info->fix.line_length = pitch;
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
|
||||
|
||||
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
|
||||
uint32_t fb_width, uint32_t fb_height)
|
||||
{
|
||||
|
@ -1005,6 +1008,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
|
|||
info->var.xres_virtual = fb->width;
|
||||
info->var.yres_virtual = fb->height;
|
||||
info->var.bits_per_pixel = fb->bits_per_pixel;
|
||||
info->var.accel_flags = FB_ACCELF_TEXT;
|
||||
info->var.xoffset = 0;
|
||||
info->var.yoffset = 0;
|
||||
info->var.activate = FB_ACTIVATE_NOW;
|
||||
|
@ -1530,3 +1534,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
|
||||
|
||||
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
|
||||
* but the module doesn't depend on any fb console symbols. At least
|
||||
* attempt to load fbcon to avoid leaving the system without a usable console.
|
||||
*/
|
||||
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
|
||||
static int __init drm_fb_helper_modinit(void)
|
||||
{
|
||||
const char *name = "fbcon";
|
||||
struct module *fbcon;
|
||||
|
||||
mutex_lock(&module_mutex);
|
||||
fbcon = find_module(name);
|
||||
mutex_unlock(&module_mutex);
|
||||
|
||||
if (!fbcon)
|
||||
request_module_nowait(name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(drm_fb_helper_modinit);
|
||||
#endif
|
||||
|
|
|
@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||
return -EBUSY; /* No exclusive opens */
|
||||
if (!drm_cpu_valid())
|
||||
return -EINVAL;
|
||||
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
|
||||
|
||||
|
|
|
@ -40,6 +40,22 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/vgaarb.h>
|
||||
|
||||
/* Access macro for slots in vblank timestamp ringbuffer. */
|
||||
#define vblanktimestamp(dev, crtc, count) ( \
|
||||
(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
|
||||
((count) % DRM_VBLANKTIME_RBSIZE)])
|
||||
|
||||
/* Retry timestamp calculation up to 3 times to satisfy
|
||||
* drm_timestamp_precision before giving up.
|
||||
*/
|
||||
#define DRM_TIMESTAMP_MAXRETRIES 3
|
||||
|
||||
/* Threshold in nanoseconds for detection of redundant
|
||||
* vblank irq in drm_handle_vblank(). 1 msec should be ok.
|
||||
*/
|
||||
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
|
||||
|
||||
/**
|
||||
* Get interrupt from bus id.
|
||||
*
|
||||
|
@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear vblank timestamp buffer for a crtc.
|
||||
*/
|
||||
static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
|
||||
{
|
||||
memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
|
||||
DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable vblank irq's on crtc, make sure that last vblank count
|
||||
* of hardware and corresponding consistent software vblank counter
|
||||
* are preserved, even if there are any spurious vblank irq's after
|
||||
* disable.
|
||||
*/
|
||||
static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
u32 vblcount;
|
||||
s64 diff_ns;
|
||||
int vblrc;
|
||||
struct timeval tvblank;
|
||||
|
||||
/* Prevent vblank irq processing while disabling vblank irqs,
|
||||
* so no updates of timestamps or count can happen after we've
|
||||
* disabled. Needed to prevent races in case of delayed irq's.
|
||||
* Disable preemption, so vblank_time_lock is held as short as
|
||||
* possible, even under a kernel with PREEMPT_RT patches.
|
||||
*/
|
||||
preempt_disable();
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
|
||||
|
||||
dev->driver->disable_vblank(dev, crtc);
|
||||
dev->vblank_enabled[crtc] = 0;
|
||||
|
||||
/* No further vblank irq's will be processed after
|
||||
* this point. Get current hardware vblank count and
|
||||
* vblank timestamp, repeat until they are consistent.
|
||||
*
|
||||
* FIXME: There is still a race condition here and in
|
||||
* drm_update_vblank_count() which can cause off-by-one
|
||||
* reinitialization of software vblank counter. If gpu
|
||||
* vblank counter doesn't increment exactly at the leading
|
||||
* edge of a vblank interval, then we can lose 1 count if
|
||||
* we happen to execute between start of vblank and the
|
||||
* delayed gpu counter increment.
|
||||
*/
|
||||
do {
|
||||
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
|
||||
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
|
||||
} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
|
||||
|
||||
/* Compute time difference to stored timestamp of last vblank
|
||||
* as updated by last invocation of drm_handle_vblank() in vblank irq.
|
||||
*/
|
||||
vblcount = atomic_read(&dev->_vblank_count[crtc]);
|
||||
diff_ns = timeval_to_ns(&tvblank) -
|
||||
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
|
||||
|
||||
/* If there is at least 1 msec difference between the last stored
|
||||
* timestamp and tvblank, then we are currently executing our
|
||||
* disable inside a new vblank interval, the tvblank timestamp
|
||||
* corresponds to this new vblank interval and the irq handler
|
||||
* for this vblank didn't run yet and won't run due to our disable.
|
||||
* Therefore we need to do the job of drm_handle_vblank() and
|
||||
* increment the vblank counter by one to account for this vblank.
|
||||
*
|
||||
* Skip this step if there isn't any high precision timestamp
|
||||
* available. In that case we can't account for this and just
|
||||
* hope for the best.
|
||||
*/
|
||||
if ((vblrc > 0) && (abs(diff_ns) > 1000000))
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
|
||||
/* Invalidate all timestamps while vblank irq's are off. */
|
||||
clear_vblank_timestamps(dev, crtc);
|
||||
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void vblank_disable_fn(unsigned long arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
|
@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg)
|
|||
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
|
||||
dev->vblank_enabled[i]) {
|
||||
DRM_DEBUG("disabling vblank on crtc %d\n", i);
|
||||
dev->last_vblank[i] =
|
||||
dev->driver->get_vblank_counter(dev, i);
|
||||
dev->driver->disable_vblank(dev, i);
|
||||
dev->vblank_enabled[i] = 0;
|
||||
vblank_disable_and_save(dev, i);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
|
@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
|
|||
kfree(dev->last_vblank);
|
||||
kfree(dev->last_vblank_wait);
|
||||
kfree(dev->vblank_inmodeset);
|
||||
kfree(dev->_vblank_time);
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
|
@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
|||
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
|
||||
(unsigned long)dev);
|
||||
spin_lock_init(&dev->vbl_lock);
|
||||
spin_lock_init(&dev->vblank_time_lock);
|
||||
|
||||
dev->num_crtcs = num_crtcs;
|
||||
|
||||
dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
|
||||
|
@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
|||
if (!dev->vblank_inmodeset)
|
||||
goto err;
|
||||
|
||||
dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
|
||||
sizeof(struct timeval), GFP_KERNEL);
|
||||
if (!dev->_vblank_time)
|
||||
goto err;
|
||||
|
||||
DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
|
||||
|
||||
/* Driver specific high-precision vblank timestamping supported? */
|
||||
if (dev->driver->get_vblank_timestamp)
|
||||
DRM_INFO("Driver supports precise vblank timestamp query.\n");
|
||||
else
|
||||
DRM_INFO("No driver support for vblank timestamp query.\n");
|
||||
|
||||
/* Zero per-crtc vblank stuff */
|
||||
for (i = 0; i < num_crtcs; i++) {
|
||||
init_waitqueue_head(&dev->vbl_queue[i]);
|
||||
|
@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install);
|
|||
*
|
||||
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
|
||||
*/
|
||||
int drm_irq_uninstall(struct drm_device * dev)
|
||||
int drm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
int irq_enabled, i;
|
||||
|
@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_control *ctl = data;
|
||||
|
||||
/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
|
||||
/* if we haven't irq we fallback for compatibility reasons -
|
||||
* this used to be a separate function in drm_dma.h
|
||||
*/
|
||||
|
||||
|
||||
switch (ctl->func) {
|
||||
|
@ -359,6 +471,287 @@ int drm_control(struct drm_device *dev, void *data,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_calc_timestamping_constants - Calculate and
|
||||
* store various constants which are later needed by
|
||||
* vblank and swap-completion timestamping, e.g, by
|
||||
* drm_calc_vbltimestamp_from_scanoutpos().
|
||||
* They are derived from crtc's true scanout timing,
|
||||
* so they take things like panel scaling or other
|
||||
* adjustments into account.
|
||||
*
|
||||
* @crtc drm_crtc whose timestamp constants should be updated.
|
||||
*
|
||||
*/
|
||||
void drm_calc_timestamping_constants(struct drm_crtc *crtc)
|
||||
{
|
||||
s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
|
||||
u64 dotclock;
|
||||
|
||||
/* Dot clock in Hz: */
|
||||
dotclock = (u64) crtc->hwmode.clock * 1000;
|
||||
|
||||
/* Valid dotclock? */
|
||||
if (dotclock > 0) {
|
||||
/* Convert scanline length in pixels and video dot clock to
|
||||
* line duration, frame duration and pixel duration in
|
||||
* nanoseconds:
|
||||
*/
|
||||
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
|
||||
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
|
||||
1000000000), dotclock);
|
||||
framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
|
||||
} else
|
||||
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
|
||||
crtc->base.id);
|
||||
|
||||
crtc->pixeldur_ns = pixeldur_ns;
|
||||
crtc->linedur_ns = linedur_ns;
|
||||
crtc->framedur_ns = framedur_ns;
|
||||
|
||||
DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
|
||||
crtc->base.id, crtc->hwmode.crtc_htotal,
|
||||
crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
|
||||
DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
|
||||
crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
|
||||
(int) linedur_ns, (int) pixeldur_ns);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_calc_timestamping_constants);
|
||||
|
||||
/**
|
||||
* drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
|
||||
* drivers. Implements calculation of exact vblank timestamps from
|
||||
* given drm_display_mode timings and current video scanout position
|
||||
* of a crtc. This can be called from within get_vblank_timestamp()
|
||||
* implementation of a kms driver to implement the actual timestamping.
|
||||
*
|
||||
* Should return timestamps conforming to the OML_sync_control OpenML
|
||||
* extension specification. The timestamp corresponds to the end of
|
||||
* the vblank interval, aka start of scanout of topmost-leftmost display
|
||||
* pixel in the following video frame.
|
||||
*
|
||||
* Requires support for optional dev->driver->get_scanout_position()
|
||||
* in kms driver, plus a bit of setup code to provide a drm_display_mode
|
||||
* that corresponds to the true scanout timing.
|
||||
*
|
||||
* The current implementation only handles standard video modes. It
|
||||
* returns as no operation if a doublescan or interlaced video mode is
|
||||
* active. Higher level code is expected to handle this.
|
||||
*
|
||||
* @dev: DRM device.
|
||||
* @crtc: Which crtc's vblank timestamp to retrieve.
|
||||
* @max_error: Desired maximum allowable error in timestamps (nanosecs).
|
||||
* On return contains true maximum error of timestamp.
|
||||
* @vblank_time: Pointer to struct timeval which should receive the timestamp.
|
||||
* @flags: Flags to pass to driver:
|
||||
* 0 = Default.
|
||||
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
|
||||
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
|
||||
*
|
||||
* Returns negative value on error, failure or if not supported in current
|
||||
* video mode:
|
||||
*
|
||||
* -EINVAL - Invalid crtc.
|
||||
* -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
|
||||
* -ENOTSUPP - Function not supported in current display mode.
|
||||
* -EIO - Failed, e.g., due to failed scanout position query.
|
||||
*
|
||||
* Returns or'ed positive status flags on success:
|
||||
*
|
||||
* DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
|
||||
* DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
|
||||
*
|
||||
*/
|
||||
int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags,
|
||||
struct drm_crtc *refcrtc)
|
||||
{
|
||||
struct timeval stime, raw_time;
|
||||
struct drm_display_mode *mode;
|
||||
int vbl_status, vtotal, vdisplay;
|
||||
int vpos, hpos, i;
|
||||
s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
|
||||
bool invbl;
|
||||
|
||||
if (crtc < 0 || crtc >= dev->num_crtcs) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Scanout position query not supported? Should not happen. */
|
||||
if (!dev->driver->get_scanout_position) {
|
||||
DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mode = &refcrtc->hwmode;
|
||||
vtotal = mode->crtc_vtotal;
|
||||
vdisplay = mode->crtc_vdisplay;
|
||||
|
||||
/* Durations of frames, lines, pixels in nanoseconds. */
|
||||
framedur_ns = refcrtc->framedur_ns;
|
||||
linedur_ns = refcrtc->linedur_ns;
|
||||
pixeldur_ns = refcrtc->pixeldur_ns;
|
||||
|
||||
/* If mode timing undefined, just return as no-op:
|
||||
* Happens during initial modesetting of a crtc.
|
||||
*/
|
||||
if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
|
||||
DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Don't know yet how to handle interlaced or
|
||||
* double scan modes. Just no-op for now.
|
||||
*/
|
||||
if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
|
||||
DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
/* Get current scanout position with system timestamp.
|
||||
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
|
||||
* if single query takes longer than max_error nanoseconds.
|
||||
*
|
||||
* This guarantees a tight bound on maximum error if
|
||||
* code gets preempted or delayed for some reason.
|
||||
*/
|
||||
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
|
||||
/* Disable preemption to make it very likely to
|
||||
* succeed in the first iteration even on PREEMPT_RT kernel.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/* Get system timestamp before query. */
|
||||
do_gettimeofday(&stime);
|
||||
|
||||
/* Get vertical and horizontal scanout pos. vpos, hpos. */
|
||||
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
|
||||
|
||||
/* Get system timestamp after query. */
|
||||
do_gettimeofday(&raw_time);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
/* Return as no-op if scanout query unsupported or failed. */
|
||||
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
|
||||
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
|
||||
crtc, vbl_status);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
|
||||
|
||||
/* Accept result with < max_error nsecs timing uncertainty. */
|
||||
if (duration_ns <= (s64) *max_error)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Noisy system timing? */
|
||||
if (i == DRM_TIMESTAMP_MAXRETRIES) {
|
||||
DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
|
||||
crtc, (int) duration_ns/1000, *max_error/1000, i);
|
||||
}
|
||||
|
||||
/* Return upper bound of timestamp precision error. */
|
||||
*max_error = (int) duration_ns;
|
||||
|
||||
/* Check if in vblank area:
|
||||
* vpos is >=0 in video scanout area, but negative
|
||||
* within vblank area, counting down the number of lines until
|
||||
* start of scanout.
|
||||
*/
|
||||
invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
|
||||
|
||||
/* Convert scanout position into elapsed time at raw_time query
|
||||
* since start of scanout at first display scanline. delta_ns
|
||||
* can be negative if start of scanout hasn't happened yet.
|
||||
*/
|
||||
delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
|
||||
|
||||
/* Is vpos outside nominal vblank area, but less than
|
||||
* 1/100 of a frame height away from start of vblank?
|
||||
* If so, assume this isn't a massively delayed vblank
|
||||
* interrupt, but a vblank interrupt that fired a few
|
||||
* microseconds before true start of vblank. Compensate
|
||||
* by adding a full frame duration to the final timestamp.
|
||||
* Happens, e.g., on ATI R500, R600.
|
||||
*
|
||||
* We only do this if DRM_CALLED_FROM_VBLIRQ.
|
||||
*/
|
||||
if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
|
||||
((vdisplay - vpos) < vtotal / 100)) {
|
||||
delta_ns = delta_ns - framedur_ns;
|
||||
|
||||
/* Signal this correction as "applied". */
|
||||
vbl_status |= 0x8;
|
||||
}
|
||||
|
||||
/* Subtract time delta from raw timestamp to get final
|
||||
* vblank_time timestamp for end of vblank.
|
||||
*/
|
||||
*vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
|
||||
|
||||
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
|
||||
crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
|
||||
raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
|
||||
(int) duration_ns/1000, i);
|
||||
|
||||
vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
|
||||
if (invbl)
|
||||
vbl_status |= DRM_VBLANKTIME_INVBL;
|
||||
|
||||
return vbl_status;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
|
||||
|
||||
/**
|
||||
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
|
||||
* vblank interval.
|
||||
*
|
||||
* @dev: DRM device
|
||||
* @crtc: which crtc's vblank timestamp to retrieve
|
||||
* @tvblank: Pointer to target struct timeval which should receive the timestamp
|
||||
* @flags: Flags to pass to driver:
|
||||
* 0 = Default.
|
||||
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
|
||||
*
|
||||
* Fetches the system timestamp corresponding to the time of the most recent
|
||||
* vblank interval on specified crtc. May call into kms-driver to
|
||||
* compute the timestamp with a high-precision GPU specific method.
|
||||
*
|
||||
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
|
||||
* call, i.e., it isn't very precisely locked to the true vblank.
|
||||
*
|
||||
* Returns non-zero if timestamp is considered to be very precise.
|
||||
*/
|
||||
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
|
||||
struct timeval *tvblank, unsigned flags)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Define requested maximum error on timestamps (nanoseconds). */
|
||||
int max_error = (int) drm_timestamp_precision * 1000;
|
||||
|
||||
/* Query driver if possible and precision timestamping enabled. */
|
||||
if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
|
||||
ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
|
||||
tvblank, flags);
|
||||
if (ret > 0)
|
||||
return (u32) ret;
|
||||
}
|
||||
|
||||
/* GPU high precision timestamp query unsupported or failed.
|
||||
* Return gettimeofday timestamp as best estimate.
|
||||
*/
|
||||
do_gettimeofday(tvblank);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_last_vbltimestamp);
|
||||
|
||||
/**
|
||||
* drm_vblank_count - retrieve "cooked" vblank counter value
|
||||
* @dev: DRM device
|
||||
|
@ -374,6 +767,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_vblank_count);
|
||||
|
||||
/**
|
||||
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
|
||||
* and the system timestamp corresponding to that vblank counter value.
|
||||
*
|
||||
* @dev: DRM device
|
||||
* @crtc: which counter to retrieve
|
||||
* @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
|
||||
*
|
||||
* Fetches the "cooked" vblank count value that represents the number of
|
||||
* vblank events since the system was booted, including lost events due to
|
||||
* modesetting activity. Returns corresponding system timestamp of the time
|
||||
* of the vblank interval that corresponds to the current value vblank counter
|
||||
* value.
|
||||
*/
|
||||
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
||||
struct timeval *vblanktime)
|
||||
{
|
||||
u32 cur_vblank;
|
||||
|
||||
/* Read timestamp from slot of _vblank_time ringbuffer
|
||||
* that corresponds to current vblank count. Retry if
|
||||
* count has incremented during readout. This works like
|
||||
* a seqlock.
|
||||
*/
|
||||
do {
|
||||
cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
|
||||
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
|
||||
smp_rmb();
|
||||
} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
|
||||
|
||||
return cur_vblank;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_count_and_time);
|
||||
|
||||
/**
|
||||
* drm_update_vblank_count - update the master vblank counter
|
||||
* @dev: DRM device
|
||||
|
@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count);
|
|||
*/
|
||||
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
u32 cur_vblank, diff;
|
||||
u32 cur_vblank, diff, tslot, rc;
|
||||
struct timeval t_vblank;
|
||||
|
||||
/*
|
||||
* Interrupts were disabled prior to this call, so deal with counter
|
||||
|
@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
* NOTE! It's possible we lost a full dev->max_vblank_count events
|
||||
* here if the register is small or we had vblank interrupts off for
|
||||
* a long time.
|
||||
*
|
||||
* We repeat the hardware vblank counter & timestamp query until
|
||||
* we get consistent results. This to prevent races between gpu
|
||||
* updating its hardware counter while we are retrieving the
|
||||
* corresponding vblank timestamp.
|
||||
*/
|
||||
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
|
||||
do {
|
||||
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
|
||||
rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
|
||||
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
|
||||
|
||||
/* Deal with counter wrap */
|
||||
diff = cur_vblank - dev->last_vblank[crtc];
|
||||
if (cur_vblank < dev->last_vblank[crtc]) {
|
||||
diff += dev->max_vblank_count;
|
||||
|
@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
|
||||
crtc, diff);
|
||||
|
||||
/* Reinitialize corresponding vblank timestamp if high-precision query
|
||||
* available. Skip this step if query unsupported or failed. Will
|
||||
* reinitialize delayed at next vblank interrupt in that case.
|
||||
*/
|
||||
if (rc) {
|
||||
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
|
||||
vblanktimestamp(dev, crtc, tslot) = t_vblank;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
}
|
||||
|
||||
|
@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
*/
|
||||
int drm_vblank_get(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
unsigned long irqflags, irqflags2;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
/* Going from 0->1 means we have to enable interrupts again */
|
||||
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
|
||||
/* Disable preemption while holding vblank_time_lock. Do
|
||||
* it explicitely to guard against PREEMPT_RT kernel.
|
||||
*/
|
||||
preempt_disable();
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
|
||||
if (!dev->vblank_enabled[crtc]) {
|
||||
/* Enable vblank irqs under vblank_time_lock protection.
|
||||
* All vblank count & timestamp updates are held off
|
||||
* until we are done reinitializing master counter and
|
||||
* timestamps. Filtercode in drm_handle_vblank() will
|
||||
* prevent double-accounting of same vblank interval.
|
||||
*/
|
||||
ret = dev->driver->enable_vblank(dev, crtc);
|
||||
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
|
||||
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
|
||||
crtc, ret);
|
||||
if (ret)
|
||||
atomic_dec(&dev->vblank_refcount[crtc]);
|
||||
else {
|
||||
|
@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
|||
drm_update_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
|
||||
preempt_enable();
|
||||
} else {
|
||||
if (!dev->vblank_enabled[crtc]) {
|
||||
atomic_dec(&dev->vblank_refcount[crtc]);
|
||||
|
@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get);
|
|||
* @crtc: which counter to give up
|
||||
*
|
||||
* Release ownership of a given vblank counter, turning off interrupts
|
||||
* if possible.
|
||||
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
|
||||
*/
|
||||
void drm_vblank_put(struct drm_device *dev, int crtc)
|
||||
{
|
||||
BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
|
||||
BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
|
||||
|
||||
/* Last user schedules interrupt disable */
|
||||
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
|
||||
mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
|
||||
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
|
||||
(drm_vblank_offdelay > 0))
|
||||
mod_timer(&dev->vblank_disable_timer,
|
||||
jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_put);
|
||||
|
||||
|
@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
|
|||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
dev->driver->disable_vblank(dev, crtc);
|
||||
vblank_disable_and_save(dev, crtc);
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
dev->vblank_enabled[crtc] = 0;
|
||||
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_off);
|
||||
|
@ -602,7 +1064,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
|
|||
e->base.file_priv = file_priv;
|
||||
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
|
||||
|
||||
do_gettimeofday(&now);
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
if (file_priv->event_space < sizeof e->event) {
|
||||
|
@ -611,7 +1072,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
|
|||
}
|
||||
|
||||
file_priv->event_space -= sizeof e->event;
|
||||
seq = drm_vblank_count(dev, pipe);
|
||||
seq = drm_vblank_count_and_time(dev, pipe, &now);
|
||||
|
||||
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
|
||||
(seq - vblwait->request.sequence) <= (1 << 23)) {
|
||||
vblwait->request.sequence = seq + 1;
|
||||
|
@ -626,15 +1088,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
|
|||
|
||||
e->event.sequence = vblwait->request.sequence;
|
||||
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
|
||||
e->event.sequence = seq;
|
||||
e->event.tv_sec = now.tv_sec;
|
||||
e->event.tv_usec = now.tv_usec;
|
||||
drm_vblank_put(dev, pipe);
|
||||
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
|
||||
wake_up_interruptible(&e->base.file_priv->event_wait);
|
||||
vblwait->reply.sequence = seq;
|
||||
trace_drm_vblank_event_delivered(current->pid, pipe,
|
||||
vblwait->request.sequence);
|
||||
} else {
|
||||
list_add_tail(&e->base.link, &dev->vblank_event_list);
|
||||
vblwait->reply.sequence = vblwait->request.sequence;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
@ -727,11 +1192,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
if (ret != -EINTR) {
|
||||
struct timeval now;
|
||||
|
||||
do_gettimeofday(&now);
|
||||
|
||||
vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
|
||||
vblwait->reply.tval_sec = now.tv_sec;
|
||||
vblwait->reply.tval_usec = now.tv_usec;
|
||||
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
|
||||
|
||||
DRM_DEBUG("returning %d to client\n",
|
||||
vblwait->reply.sequence);
|
||||
} else {
|
||||
|
@ -750,8 +1214,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
|
|||
unsigned long flags;
|
||||
unsigned int seq;
|
||||
|
||||
do_gettimeofday(&now);
|
||||
seq = drm_vblank_count(dev, crtc);
|
||||
seq = drm_vblank_count_and_time(dev, crtc, &now);
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
|
@ -789,11 +1252,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
|
|||
*/
|
||||
void drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
u32 vblcount;
|
||||
s64 diff_ns;
|
||||
struct timeval tvblank;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (!dev->num_crtcs)
|
||||
return;
|
||||
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
/* Need timestamp lock to prevent concurrent execution with
|
||||
* vblank enable/disable, as this would cause inconsistent
|
||||
* or corrupted timestamps and vblank counts.
|
||||
*/
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
|
||||
|
||||
/* Vblank irq handling disabled. Nothing to do. */
|
||||
if (!dev->vblank_enabled[crtc]) {
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Fetch corresponding timestamp for this vblank interval from
|
||||
* driver and store it in proper slot of timestamp ringbuffer.
|
||||
*/
|
||||
|
||||
/* Get current timestamp and count. */
|
||||
vblcount = atomic_read(&dev->_vblank_count[crtc]);
|
||||
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
|
||||
|
||||
/* Compute time difference to timestamp of last vblank */
|
||||
diff_ns = timeval_to_ns(&tvblank) -
|
||||
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
|
||||
|
||||
/* Update vblank timestamp and count if at least
|
||||
* DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
|
||||
* difference between last stored timestamp and current
|
||||
* timestamp. A smaller difference means basically
|
||||
* identical timestamps. Happens if this vblank has
|
||||
* been already processed and this is a redundant call,
|
||||
* e.g., due to spurious vblank interrupts. We need to
|
||||
* ignore those for accounting.
|
||||
*/
|
||||
if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
|
||||
/* Store new timestamp in ringbuffer. */
|
||||
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
|
||||
smp_wmb();
|
||||
|
||||
/* Increment cooked vblank count. This also atomically commits
|
||||
* the timestamp computed above.
|
||||
*/
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
} else {
|
||||
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
|
||||
crtc, (int) diff_ns);
|
||||
}
|
||||
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
drm_handle_vblank_events(dev, crtc);
|
||||
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_handle_vblank);
|
||||
|
|
|
@ -392,9 +392,35 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
|
|||
mm->scanned_blocks = 0;
|
||||
mm->scan_hit_start = 0;
|
||||
mm->scan_hit_size = 0;
|
||||
mm->scan_check_range = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_init_scan);
|
||||
|
||||
/**
|
||||
* Initializa lru scanning.
|
||||
*
|
||||
* This simply sets up the scanning routines with the parameters for the desired
|
||||
* hole. This version is for range-restricted scans.
|
||||
*
|
||||
* Warning: As long as the scan list is non-empty, no other operations than
|
||||
* adding/removing nodes to/from the scan list are allowed.
|
||||
*/
|
||||
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
|
||||
unsigned alignment,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
mm->scan_alignment = alignment;
|
||||
mm->scan_size = size;
|
||||
mm->scanned_blocks = 0;
|
||||
mm->scan_hit_start = 0;
|
||||
mm->scan_hit_size = 0;
|
||||
mm->scan_start = start;
|
||||
mm->scan_end = end;
|
||||
mm->scan_check_range = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
|
||||
|
||||
/**
|
||||
* Add a node to the scan list that might be freed to make space for the desired
|
||||
* hole.
|
||||
|
@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
|
|||
struct drm_mm *mm = node->mm;
|
||||
struct list_head *prev_free, *next_free;
|
||||
struct drm_mm_node *prev_node, *next_node;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
|
||||
mm->scanned_blocks++;
|
||||
|
||||
|
@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
|
|||
node->free_stack.prev = prev_free;
|
||||
node->free_stack.next = next_free;
|
||||
|
||||
if (check_free_hole(node->start, node->start + node->size,
|
||||
if (mm->scan_check_range) {
|
||||
adj_start = node->start < mm->scan_start ?
|
||||
mm->scan_start : node->start;
|
||||
adj_end = node->start + node->size > mm->scan_end ?
|
||||
mm->scan_end : node->start + node->size;
|
||||
} else {
|
||||
adj_start = node->start;
|
||||
adj_end = node->start + node->size;
|
||||
}
|
||||
|
||||
if (check_free_hole(adj_start , adj_end,
|
||||
mm->scan_size, mm->scan_alignment)) {
|
||||
mm->scan_hit_start = node->start;
|
||||
mm->scan_hit_size = node->size;
|
||||
|
|
|
@ -40,12 +40,22 @@
|
|||
unsigned int drm_debug = 0; /* 1 to enable debug output */
|
||||
EXPORT_SYMBOL(drm_debug);
|
||||
|
||||
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
|
||||
EXPORT_SYMBOL(drm_vblank_offdelay);
|
||||
|
||||
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
|
||||
EXPORT_SYMBOL(drm_timestamp_precision);
|
||||
|
||||
MODULE_AUTHOR(CORE_AUTHOR);
|
||||
MODULE_DESCRIPTION(CORE_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
MODULE_PARM_DESC(debug, "Enable debug output");
|
||||
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
|
||||
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
|
||||
|
||||
module_param_named(debug, drm_debug, int, 0600);
|
||||
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
|
||||
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
|
||||
|
||||
struct idr drm_minors_idr;
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
|
|||
i915_gem.o \
|
||||
i915_gem_debug.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_execbuffer.o \
|
||||
i915_gem_gtt.o \
|
||||
i915_gem_tiling.o \
|
||||
i915_trace_points.o \
|
||||
intel_display.o \
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
B(is_broadwater);
|
||||
B(is_crestline);
|
||||
B(has_fbc);
|
||||
B(has_rc6);
|
||||
B(has_pipe_cxsr);
|
||||
B(has_hotplug);
|
||||
B(cursor_needs_physical);
|
||||
|
@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
|
||||
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj_priv->user_pin_count > 0)
|
||||
if (obj->user_pin_count > 0)
|
||||
return "P";
|
||||
else if (obj_priv->pin_count > 0)
|
||||
else if (obj->pin_count > 0)
|
||||
return "p";
|
||||
else
|
||||
return " ";
|
||||
}
|
||||
|
||||
static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
|
||||
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
switch (obj_priv->tiling_mode) {
|
||||
switch (obj->tiling_mode) {
|
||||
default:
|
||||
case I915_TILING_NONE: return " ";
|
||||
case I915_TILING_X: return "X";
|
||||
|
@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
|
|||
static void
|
||||
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
|
||||
seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
|
||||
&obj->base,
|
||||
get_pin_flag(obj),
|
||||
get_tiling_flag(obj),
|
||||
|
@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
obj->base.read_domains,
|
||||
obj->base.write_domain,
|
||||
obj->last_rendering_seqno,
|
||||
obj->last_fenced_seqno,
|
||||
obj->dirty ? " dirty" : "",
|
||||
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
||||
if (obj->base.name)
|
||||
|
@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
if (obj->fence_reg != I915_FENCE_REG_NONE)
|
||||
seq_printf(m, " (fence: %d)", obj->fence_reg);
|
||||
if (obj->gtt_space != NULL)
|
||||
seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
|
||||
seq_printf(m, " (gtt offset: %08x, size: %08x)",
|
||||
obj->gtt_offset, (unsigned int)obj->gtt_space->size);
|
||||
if (obj->pin_mappable || obj->fault_mappable) {
|
||||
char s[3], *t = s;
|
||||
if (obj->pin_mappable)
|
||||
*t++ = 'p';
|
||||
if (obj->fault_mappable)
|
||||
*t++ = 'f';
|
||||
*t = '\0';
|
||||
seq_printf(m, " (%s mappable)", s);
|
||||
}
|
||||
if (obj->ring != NULL)
|
||||
seq_printf(m, " (%s)", obj->ring->name);
|
||||
}
|
||||
|
@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
struct list_head *head;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
size_t total_obj_size, total_gtt_size;
|
||||
int count, ret;
|
||||
|
||||
|
@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj_priv, head, mm_list) {
|
||||
list_for_each_entry(obj, head, mm_list) {
|
||||
seq_printf(m, " ");
|
||||
describe_obj(m, obj_priv);
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
total_obj_size += obj_priv->base.size;
|
||||
total_gtt_size += obj_priv->gtt_space->size;
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += obj->gtt_space->size;
|
||||
count++;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define count_objects(list, member) do { \
|
||||
list_for_each_entry(obj, list, member) { \
|
||||
size += obj->gtt_space->size; \
|
||||
++count; \
|
||||
if (obj->map_and_fenceable) { \
|
||||
mappable_size += obj->gtt_space->size; \
|
||||
++mappable_count; \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 count, mappable_count;
|
||||
size_t size, mappable_size;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
|
||||
seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
|
||||
seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
|
||||
seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
|
||||
seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
|
||||
seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
|
||||
seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
|
||||
seq_printf(m, "%u objects, %zu bytes\n",
|
||||
dev_priv->mm.object_count,
|
||||
dev_priv->mm.object_memory);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.gtt_list, gtt_list);
|
||||
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.active_list, mm_list);
|
||||
count_objects(&dev_priv->mm.flushing_list, mm_list);
|
||||
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.pinned_list, mm_list);
|
||||
seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.inactive_list, mm_list);
|
||||
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.deferred_free_list, mm_list);
|
||||
seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
|
||||
if (obj->fault_mappable) {
|
||||
size += obj->gtt_space->size;
|
||||
++count;
|
||||
}
|
||||
if (obj->pin_mappable) {
|
||||
mappable_size += obj->gtt_space->size;
|
||||
++mappable_count;
|
||||
}
|
||||
}
|
||||
seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
|
||||
mappable_count, mappable_size);
|
||||
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
|
||||
count, size);
|
||||
|
||||
seq_printf(m, "%zu [%zu] gtt total\n",
|
||||
dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "%d prepares\n", work->pending);
|
||||
|
||||
if (work->old_fb_obj) {
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
|
||||
if(obj_priv)
|
||||
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
|
||||
struct drm_i915_gem_object *obj = work->old_fb_obj;
|
||||
if (obj)
|
||||
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
|
||||
}
|
||||
if (work->pending_flip_obj) {
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
|
||||
if(obj_priv)
|
||||
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
|
||||
struct drm_i915_gem_object *obj = work->pending_flip_obj;
|
||||
if (obj)
|
||||
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_request *gem_request;
|
||||
int ret;
|
||||
int ret, count;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_printf(m, "Request:\n");
|
||||
list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
(int) (jiffies - gem_request->emitted_jiffies));
|
||||
count = 0;
|
||||
if (!list_empty(&dev_priv->ring[RCS].request_list)) {
|
||||
seq_printf(m, "Render requests:\n");
|
||||
list_for_each_entry(gem_request,
|
||||
&dev_priv->ring[RCS].request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
(int) (jiffies - gem_request->emitted_jiffies));
|
||||
}
|
||||
count++;
|
||||
}
|
||||
if (!list_empty(&dev_priv->ring[VCS].request_list)) {
|
||||
seq_printf(m, "BSD requests:\n");
|
||||
list_for_each_entry(gem_request,
|
||||
&dev_priv->ring[VCS].request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
(int) (jiffies - gem_request->emitted_jiffies));
|
||||
}
|
||||
count++;
|
||||
}
|
||||
if (!list_empty(&dev_priv->ring[BCS].request_list)) {
|
||||
seq_printf(m, "BLT requests:\n");
|
||||
list_for_each_entry(gem_request,
|
||||
&dev_priv->ring[BCS].request_list,
|
||||
list) {
|
||||
seq_printf(m, " %d @ %d\n",
|
||||
gem_request->seqno,
|
||||
(int) (jiffies - gem_request->emitted_jiffies));
|
||||
}
|
||||
count++;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (count == 0)
|
||||
seq_printf(m, "No requests\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_ring_seqno_info(struct seq_file *m,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (ring->get_seqno) {
|
||||
seq_printf(m, "Current sequence (%s): %d\n",
|
||||
ring->name, ring->get_seqno(ring));
|
||||
seq_printf(m, "Waiter sequence (%s): %d\n",
|
||||
ring->name, ring->waiting_seqno);
|
||||
seq_printf(m, "IRQ sequence (%s): %d\n",
|
||||
ring->name, ring->irq_seqno);
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_gem_seqno_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev_priv->render_ring.status_page.page_addr != NULL) {
|
||||
seq_printf(m, "Current sequence: %d\n",
|
||||
dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
|
||||
} else {
|
||||
seq_printf(m, "Current sequence: hws uninitialized\n");
|
||||
}
|
||||
seq_printf(m, "Waiter sequence: %d\n",
|
||||
dev_priv->mm.waiting_gem_seqno);
|
||||
seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
i915_ring_seqno_info(m, &dev_priv->ring[i]);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
|
@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
}
|
||||
seq_printf(m, "Interrupts received: %d\n",
|
||||
atomic_read(&dev_priv->irq_received));
|
||||
if (dev_priv->render_ring.status_page.page_addr != NULL) {
|
||||
seq_printf(m, "Current sequence: %d\n",
|
||||
dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
|
||||
} else {
|
||||
seq_printf(m, "Current sequence: hws uninitialized\n");
|
||||
}
|
||||
seq_printf(m, "Waiter sequence: %d\n",
|
||||
dev_priv->mm.waiting_gem_seqno);
|
||||
seq_printf(m, "IRQ sequence: %d\n",
|
||||
dev_priv->mm.irq_gem_seqno);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
i915_ring_seqno_info(m, &dev_priv->ring[i]);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
|
||||
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++) {
|
||||
struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
|
||||
struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
|
||||
|
||||
if (obj == NULL) {
|
||||
seq_printf(m, "Fenced object[%2d] = unused\n", i);
|
||||
} else {
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
|
||||
obj_priv = to_intel_bo(obj);
|
||||
seq_printf(m, "Fenced object[%2d] = %p: %s "
|
||||
"%08x %08zx %08x %s %08x %08x %d",
|
||||
i, obj, get_pin_flag(obj_priv),
|
||||
obj_priv->gtt_offset,
|
||||
obj->size, obj_priv->stride,
|
||||
get_tiling_flag(obj_priv),
|
||||
obj->read_domains, obj->write_domain,
|
||||
obj_priv->last_rendering_seqno);
|
||||
if (obj->name)
|
||||
seq_printf(m, " (name: %d)", obj->name);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
seq_printf(m, "Fenced object[%2d] = ", i);
|
||||
if (obj == NULL)
|
||||
seq_printf(m, "unused");
|
||||
else
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
struct intel_ring_buffer *ring;
|
||||
volatile u32 *hws;
|
||||
int i;
|
||||
|
||||
hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
hws = (volatile u32 *)ring->status_page.page_addr;
|
||||
if (hws == NULL)
|
||||
return 0;
|
||||
|
||||
|
@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
|||
|
||||
static void i915_dump_object(struct seq_file *m,
|
||||
struct io_mapping *mapping,
|
||||
struct drm_i915_gem_object *obj_priv)
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int page, page_count, i;
|
||||
|
||||
page_count = obj_priv->base.size / PAGE_SIZE;
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
for (page = 0; page < page_count; page++) {
|
||||
u32 *mem = io_mapping_map_wc(mapping,
|
||||
obj_priv->gtt_offset + page * PAGE_SIZE);
|
||||
obj->gtt_offset + page * PAGE_SIZE);
|
||||
for (i = 0; i < PAGE_SIZE; i += 4)
|
||||
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
|
||||
io_mapping_unmap(mem);
|
||||
|
@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
obj = &obj_priv->base;
|
||||
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||
seq_printf(m, "--- gtt_offset = 0x%08x\n",
|
||||
obj_priv->gtt_offset);
|
||||
i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||
seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
|
||||
i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dev_priv->render_ring.gem_object) {
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
if (!ring->obj) {
|
||||
seq_printf(m, "No ringbuffer setup\n");
|
||||
} else {
|
||||
u8 *virt = dev_priv->render_ring.virtual_start;
|
||||
u8 *virt = ring->virtual_start;
|
||||
uint32_t off;
|
||||
|
||||
for (off = 0; off < dev_priv->render_ring.size; off += 4) {
|
||||
for (off = 0; off < ring->size; off += 4) {
|
||||
uint32_t *ptr = (uint32_t *)(virt + off);
|
||||
seq_printf(m, "%08x : %08x\n", off, *ptr);
|
||||
}
|
||||
|
@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int head, tail;
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
|
||||
tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
if (ring->size == 0)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "RingHead : %08x\n", head);
|
||||
seq_printf(m, "RingTail : %08x\n", tail);
|
||||
seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
|
||||
seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
|
||||
seq_printf(m, "Ring %s:\n", ring->name);
|
||||
seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
|
||||
seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
|
||||
seq_printf(m, " Size : %08x\n", ring->size);
|
||||
seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
|
||||
seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
|
||||
if (IS_GEN6(dev)) {
|
||||
seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
|
||||
seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
|
||||
}
|
||||
seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
|
||||
seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *ring_str(int ring)
|
||||
{
|
||||
switch (ring) {
|
||||
case RING_RENDER: return " render";
|
||||
case RING_BSD: return " bsd";
|
||||
case RING_BLT: return " blt";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
static const char *pin_flag(int pinned)
|
||||
{
|
||||
if (pinned > 0)
|
||||
|
@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable)
|
|||
return purgeable ? " purgeable" : "";
|
||||
}
|
||||
|
||||
static void print_error_buffers(struct seq_file *m,
|
||||
const char *name,
|
||||
struct drm_i915_error_buffer *err,
|
||||
int count)
|
||||
{
|
||||
seq_printf(m, "%s [%d]:\n", name, count);
|
||||
|
||||
while (count--) {
|
||||
seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
|
||||
err->gtt_offset,
|
||||
err->size,
|
||||
err->read_domains,
|
||||
err->write_domain,
|
||||
err->seqno,
|
||||
pin_flag(err->pinned),
|
||||
tiling_flag(err->tiling),
|
||||
dirty_flag(err->dirty),
|
||||
purgeable_flag(err->purgeable),
|
||||
ring_str(err->ring));
|
||||
|
||||
if (err->name)
|
||||
seq_printf(m, " (name: %d)", err->name);
|
||||
if (err->fence_reg != I915_FENCE_REG_NONE)
|
||||
seq_printf(m, " (fence: %d)", err->fence_reg);
|
||||
|
||||
seq_printf(m, "\n");
|
||||
err++;
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_error_state(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
|
@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused)
|
|||
error->time.tv_usec);
|
||||
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
|
||||
seq_printf(m, "EIR: 0x%08x\n", error->eir);
|
||||
seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
|
||||
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
|
||||
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
seq_printf(m, "ERROR: 0x%08x\n", error->error);
|
||||
seq_printf(m, "Blitter command stream:\n");
|
||||
seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
|
||||
seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
|
||||
seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
|
||||
seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
|
||||
seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
|
||||
seq_printf(m, "Video (BSD) command stream:\n");
|
||||
seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
|
||||
seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
|
||||
seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
|
||||
seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
|
||||
seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
|
||||
}
|
||||
seq_printf(m, "Render command stream:\n");
|
||||
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
|
||||
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
|
||||
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
|
||||
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
|
||||
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
|
||||
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
|
||||
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
|
||||
}
|
||||
seq_printf(m, "seqno: 0x%08x\n", error->seqno);
|
||||
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
|
||||
seq_printf(m, " seqno: 0x%08x\n", error->seqno);
|
||||
|
||||
if (error->active_bo_count) {
|
||||
seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
|
||||
for (i = 0; i < 16; i++)
|
||||
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
|
||||
|
||||
for (i = 0; i < error->active_bo_count; i++) {
|
||||
seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
|
||||
error->active_bo[i].gtt_offset,
|
||||
error->active_bo[i].size,
|
||||
error->active_bo[i].read_domains,
|
||||
error->active_bo[i].write_domain,
|
||||
error->active_bo[i].seqno,
|
||||
pin_flag(error->active_bo[i].pinned),
|
||||
tiling_flag(error->active_bo[i].tiling),
|
||||
dirty_flag(error->active_bo[i].dirty),
|
||||
purgeable_flag(error->active_bo[i].purgeable));
|
||||
if (error->active_bo)
|
||||
print_error_buffers(m, "Active",
|
||||
error->active_bo,
|
||||
error->active_bo_count);
|
||||
|
||||
if (error->active_bo[i].name)
|
||||
seq_printf(m, " (name: %d)", error->active_bo[i].name);
|
||||
if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
|
||||
seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
|
||||
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
}
|
||||
if (error->pinned_bo)
|
||||
print_error_buffers(m, "Pinned",
|
||||
error->pinned_bo,
|
||||
error->pinned_bo_count);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
|
||||
if (error->batchbuffer[i]) {
|
||||
|
@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
|
|||
if (error->overlay)
|
||||
intel_overlay_print_error_state(m, error->overlay);
|
||||
|
||||
if (error->display)
|
||||
intel_display_print_error_state(m, dev, error->display);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
|
||||
|
||||
|
@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
|||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u16 rgvswctl = I915_READ16(MEMSWCTL);
|
||||
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
|
||||
|
||||
seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
|
||||
seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
|
||||
seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
|
||||
MEMSTAT_VID_SHIFT);
|
||||
seq_printf(m, "Current P-state: %d\n",
|
||||
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
|
||||
if (IS_GEN5(dev)) {
|
||||
u16 rgvswctl = I915_READ16(MEMSWCTL);
|
||||
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
|
||||
|
||||
seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
|
||||
seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
|
||||
seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
|
||||
MEMSTAT_VID_SHIFT);
|
||||
seq_printf(m, "Current P-state: %d\n",
|
||||
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
|
||||
} else if (IS_GEN6(dev)) {
|
||||
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
|
||||
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
int max_freq;
|
||||
|
||||
/* RPSTAT1 is in the GT power well */
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
|
||||
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
||||
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
|
||||
seq_printf(m, "Render p-state ratio: %d\n",
|
||||
(gt_perf_status & 0xff00) >> 8);
|
||||
seq_printf(m, "Render p-state VID: %d\n",
|
||||
gt_perf_status & 0xff);
|
||||
seq_printf(m, "Render p-state limit: %d\n",
|
||||
rp_state_limits & 0xff);
|
||||
|
||||
max_freq = (rp_state_cap & 0xff0000) >> 16;
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
max_freq * 100);
|
||||
|
||||
max_freq = (rp_state_cap & 0xff00) >> 8;
|
||||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
max_freq * 100);
|
||||
|
||||
max_freq = rp_state_cap & 0xff;
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
max_freq * 100);
|
||||
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
} else {
|
||||
seq_printf(m, "no P-state info available\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
bool sr_enabled = false;
|
||||
|
||||
if (IS_GEN5(dev))
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
|
||||
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
|
||||
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
|
||||
|
@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel);
|
||||
describe_obj(m, to_intel_bo(fb->obj));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
|
||||
|
@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel);
|
||||
describe_obj(m, to_intel_bo(fb->obj));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
|
||||
|
@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp,
|
|||
loff_t *ppos)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 1;
|
||||
|
||||
|
@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp,
|
|||
}
|
||||
|
||||
DRM_INFO("Manually setting wedged to %d\n", val);
|
||||
|
||||
atomic_set(&dev_priv->mm.wedged, val);
|
||||
if (val) {
|
||||
wake_up_all(&dev_priv->irq_queue);
|
||||
queue_work(dev_priv->wq, &dev_priv->error_work);
|
||||
}
|
||||
i915_handle_error(dev, val);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_gem_seqno", i915_gem_seqno_info, 0},
|
||||
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
||||
{"i915_gem_interrupt", i915_interrupt_info, 0},
|
||||
{"i915_gem_hws", i915_hws_info, 0},
|
||||
{"i915_ringbuffer_data", i915_ringbuffer_data, 0},
|
||||
{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
|
||||
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
|
||||
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
|
||||
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
|
||||
{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
|
||||
{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
|
||||
{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
|
||||
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
|
||||
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
|
||||
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
|
||||
{"i915_batchbuffers", i915_batchbuffer_info, 0},
|
||||
{"i915_error_state", i915_error_state, 0},
|
||||
{"i915_rstdby_delays", i915_rstdby_delays, 0},
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
|
|||
|
||||
static const struct intel_device_info intel_i965gm_info = {
|
||||
.gen = 4, .is_crestline = 1,
|
||||
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
|
||||
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
.supports_tv = 1,
|
||||
};
|
||||
|
@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
|
|||
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
.gen = 4, .is_g4x = 1,
|
||||
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
|
||||
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
|
||||
.has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.supports_tv = 1,
|
||||
.has_bsd_ring = 1,
|
||||
|
@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
|
|||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
.gen = 5, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 0, /* disabled due to buggy hardware */
|
||||
.has_bsd_ring = 1,
|
||||
};
|
||||
|
@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
|
|||
static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
.gen = 6, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
};
|
||||
|
@ -244,10 +245,34 @@ void intel_detect_pch (struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
|
||||
udelay(10);
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 1);
|
||||
POSTING_READ(FORCEWAKE);
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 0);
|
||||
POSTING_READ(FORCEWAKE);
|
||||
}
|
||||
|
||||
static int i915_drm_freeze(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
|
||||
/* If KMS is active, we do the leavevt stuff here */
|
||||
|
@ -284,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||
if (state.event == PM_EVENT_PRETHAW)
|
||||
return 0;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
error = i915_drm_freeze(dev);
|
||||
if (error)
|
||||
|
@ -304,6 +331,12 @@ static int i915_drm_thaw(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int error = 0;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
i915_restore_state(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
|
@ -332,6 +365,9 @@ int i915_resume(struct drm_device *dev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
if (pci_enable_device(dev->pdev))
|
||||
return -EIO;
|
||||
|
||||
|
@ -405,6 +441,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
|
|||
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
|
||||
}
|
||||
|
||||
static int gen6_do_reset(struct drm_device *dev, u8 flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
|
||||
return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
|
||||
}
|
||||
|
||||
/**
|
||||
* i965_reset - reset chip after a hang
|
||||
* @dev: drm device to reset
|
||||
|
@ -431,7 +475,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
|
|||
bool need_display = true;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!mutex_trylock(&dev->struct_mutex))
|
||||
return -EBUSY;
|
||||
|
||||
i915_gem_reset(dev);
|
||||
|
||||
|
@ -439,6 +484,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
|
|||
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
|
||||
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
|
||||
} else switch (INTEL_INFO(dev)->gen) {
|
||||
case 6:
|
||||
ret = gen6_do_reset(dev, flags);
|
||||
break;
|
||||
case 5:
|
||||
ret = ironlake_do_reset(dev, flags);
|
||||
break;
|
||||
|
@ -472,9 +520,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
|
|||
*/
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
|
||||
!dev_priv->mm.suspended) {
|
||||
struct intel_ring_buffer *ring = &dev_priv->render_ring;
|
||||
dev_priv->mm.suspended = 0;
|
||||
ring->init(dev, ring);
|
||||
|
||||
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
|
||||
if (HAS_BSD(dev))
|
||||
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
|
||||
if (HAS_BLT(dev))
|
||||
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_irq_uninstall(dev);
|
||||
drm_irq_install(dev);
|
||||
|
@ -523,6 +576,9 @@ static int i915_pm_suspend(struct device *dev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
error = i915_drm_freeze(drm_dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -606,6 +662,8 @@ static struct drm_driver driver = {
|
|||
.device_is_agp = i915_driver_device_is_agp,
|
||||
.enable_vblank = i915_enable_vblank,
|
||||
.disable_vblank = i915_disable_vblank,
|
||||
.get_vblank_timestamp = i915_get_vblank_timestamp,
|
||||
.get_scanout_position = i915_get_crtc_scanoutpos,
|
||||
.irq_preinstall = i915_driver_irq_preinstall,
|
||||
.irq_postinstall = i915_driver_irq_postinstall,
|
||||
.irq_uninstall = i915_driver_irq_uninstall,
|
||||
|
@ -661,8 +719,6 @@ static int __init i915_init(void)
|
|||
|
||||
driver.num_ioctls = i915_max_ioctl;
|
||||
|
||||
i915_gem_shrinker_init();
|
||||
|
||||
/*
|
||||
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
|
||||
* explicitly disabled with the module pararmeter.
|
||||
|
@ -684,17 +740,11 @@ static int __init i915_init(void)
|
|||
driver.driver_features &= ~DRIVER_MODESET;
|
||||
#endif
|
||||
|
||||
if (!(driver.driver_features & DRIVER_MODESET)) {
|
||||
driver.suspend = i915_suspend;
|
||||
driver.resume = i915_resume;
|
||||
}
|
||||
|
||||
return drm_init(&driver);
|
||||
}
|
||||
|
||||
static void __exit i915_exit(void)
|
||||
{
|
||||
i915_gem_shrinker_exit();
|
||||
drm_exit(&driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
|
|||
int id;
|
||||
struct page **page_list;
|
||||
drm_dma_handle_t *handle;
|
||||
struct drm_gem_object *cur_obj;
|
||||
struct drm_i915_gem_object *cur_obj;
|
||||
};
|
||||
|
||||
struct mem_block {
|
||||
|
@ -124,9 +124,9 @@ struct drm_i915_master_private {
|
|||
#define I915_FENCE_REG_NONE -1
|
||||
|
||||
struct drm_i915_fence_reg {
|
||||
struct drm_gem_object *obj;
|
||||
struct list_head lru_list;
|
||||
bool gpu;
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint32_t setup_seqno;
|
||||
};
|
||||
|
||||
struct sdvo_device_mapping {
|
||||
|
@ -139,6 +139,8 @@ struct sdvo_device_mapping {
|
|||
u8 ddc_pin;
|
||||
};
|
||||
|
||||
struct intel_display_error_state;
|
||||
|
||||
struct drm_i915_error_state {
|
||||
u32 eir;
|
||||
u32 pgtbl_er;
|
||||
|
@ -148,11 +150,23 @@ struct drm_i915_error_state {
|
|||
u32 ipehr;
|
||||
u32 instdone;
|
||||
u32 acthd;
|
||||
u32 error; /* gen6+ */
|
||||
u32 bcs_acthd; /* gen6+ blt engine */
|
||||
u32 bcs_ipehr;
|
||||
u32 bcs_ipeir;
|
||||
u32 bcs_instdone;
|
||||
u32 bcs_seqno;
|
||||
u32 vcs_acthd; /* gen6+ bsd engine */
|
||||
u32 vcs_ipehr;
|
||||
u32 vcs_ipeir;
|
||||
u32 vcs_instdone;
|
||||
u32 vcs_seqno;
|
||||
u32 instpm;
|
||||
u32 instps;
|
||||
u32 instdone1;
|
||||
u32 seqno;
|
||||
u64 bbaddr;
|
||||
u64 fence[16];
|
||||
struct timeval time;
|
||||
struct drm_i915_error_object {
|
||||
int page_count;
|
||||
|
@ -171,9 +185,11 @@ struct drm_i915_error_state {
|
|||
u32 tiling:2;
|
||||
u32 dirty:1;
|
||||
u32 purgeable:1;
|
||||
} *active_bo;
|
||||
u32 active_bo_count;
|
||||
u32 ring:4;
|
||||
} *active_bo, *pinned_bo;
|
||||
u32 active_bo_count, pinned_bo_count;
|
||||
struct intel_overlay_error_state *overlay;
|
||||
struct intel_display_error_state *display;
|
||||
};
|
||||
|
||||
struct drm_i915_display_funcs {
|
||||
|
@ -207,7 +223,6 @@ struct intel_device_info {
|
|||
u8 is_broadwater : 1;
|
||||
u8 is_crestline : 1;
|
||||
u8 has_fbc : 1;
|
||||
u8 has_rc6 : 1;
|
||||
u8 has_pipe_cxsr : 1;
|
||||
u8 has_hotplug : 1;
|
||||
u8 cursor_needs_physical : 1;
|
||||
|
@ -243,6 +258,7 @@ typedef struct drm_i915_private {
|
|||
const struct intel_device_info *info;
|
||||
|
||||
int has_gem;
|
||||
int relative_constants_mode;
|
||||
|
||||
void __iomem *regs;
|
||||
|
||||
|
@ -253,20 +269,15 @@ typedef struct drm_i915_private {
|
|||
} *gmbus;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_ring_buffer render_ring;
|
||||
struct intel_ring_buffer bsd_ring;
|
||||
struct intel_ring_buffer blt_ring;
|
||||
struct intel_ring_buffer ring[I915_NUM_RINGS];
|
||||
uint32_t next_seqno;
|
||||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
void *seqno_page;
|
||||
dma_addr_t dma_status_page;
|
||||
uint32_t counter;
|
||||
unsigned int seqno_gfx_addr;
|
||||
drm_local_map_t hws_map;
|
||||
struct drm_gem_object *seqno_obj;
|
||||
struct drm_gem_object *pwrctx;
|
||||
struct drm_gem_object *renderctx;
|
||||
struct drm_i915_gem_object *pwrctx;
|
||||
struct drm_i915_gem_object *renderctx;
|
||||
|
||||
struct resource mch_res;
|
||||
|
||||
|
@ -275,25 +286,17 @@ typedef struct drm_i915_private {
|
|||
int front_offset;
|
||||
int current_page;
|
||||
int page_flipping;
|
||||
#define I915_DEBUG_READ (1<<0)
|
||||
#define I915_DEBUG_WRITE (1<<1)
|
||||
unsigned long debug_flags;
|
||||
|
||||
wait_queue_head_t irq_queue;
|
||||
atomic_t irq_received;
|
||||
/** Protects user_irq_refcount and irq_mask_reg */
|
||||
spinlock_t user_irq_lock;
|
||||
u32 trace_irq_seqno;
|
||||
|
||||
/* protects the irq masks */
|
||||
spinlock_t irq_lock;
|
||||
/** Cached value of IMR to avoid reads in updating the bitfield */
|
||||
u32 irq_mask_reg;
|
||||
u32 pipestat[2];
|
||||
/** splitted irq regs for graphics and display engine on Ironlake,
|
||||
irq_mask_reg is still used for display irq. */
|
||||
u32 gt_irq_mask_reg;
|
||||
u32 gt_irq_enable_reg;
|
||||
u32 de_irq_enable_reg;
|
||||
u32 pch_irq_mask_reg;
|
||||
u32 pch_irq_enable_reg;
|
||||
u32 irq_mask;
|
||||
u32 gt_irq_mask;
|
||||
u32 pch_irq_mask;
|
||||
|
||||
u32 hotplug_supported_mask;
|
||||
struct work_struct hotplug_work;
|
||||
|
@ -306,7 +309,7 @@ typedef struct drm_i915_private {
|
|||
int num_pipe;
|
||||
|
||||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
|
||||
struct timer_list hangcheck_timer;
|
||||
int hangcheck_count;
|
||||
uint32_t last_acthd;
|
||||
|
@ -530,23 +533,21 @@ typedef struct drm_i915_private {
|
|||
|
||||
struct {
|
||||
/** Bridge to intel-gtt-ko */
|
||||
struct intel_gtt *gtt;
|
||||
const struct intel_gtt *gtt;
|
||||
/** Memory allocator for GTT stolen memory */
|
||||
struct drm_mm vram;
|
||||
struct drm_mm stolen;
|
||||
/** Memory allocator for GTT */
|
||||
struct drm_mm gtt_space;
|
||||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head gtt_list;
|
||||
/** End of mappable part of GTT */
|
||||
unsigned long gtt_mappable_end;
|
||||
|
||||
struct io_mapping *gtt_mapping;
|
||||
int gtt_mtrr;
|
||||
|
||||
/**
|
||||
* Membership on list of all loaded devices, used to evict
|
||||
* inactive buffers under memory pressure.
|
||||
*
|
||||
* Modifications should only be done whilst holding the
|
||||
* shrink_list_lock spinlock.
|
||||
*/
|
||||
struct list_head shrink_list;
|
||||
struct shrinker inactive_shrinker;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
|
@ -608,16 +609,6 @@ typedef struct drm_i915_private {
|
|||
*/
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* Waiting sequence number, if any
|
||||
*/
|
||||
uint32_t waiting_gem_seqno;
|
||||
|
||||
/**
|
||||
* Last seq seen at irq time
|
||||
*/
|
||||
uint32_t irq_gem_seqno;
|
||||
|
||||
/**
|
||||
* Flag if the X Server, and thus DRM, is not currently in
|
||||
* control of the device.
|
||||
|
@ -645,16 +636,11 @@ typedef struct drm_i915_private {
|
|||
/* storage for physical objects */
|
||||
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
|
||||
|
||||
uint32_t flush_rings;
|
||||
|
||||
/* accounting, useful for userland debugging */
|
||||
size_t object_memory;
|
||||
size_t pin_memory;
|
||||
size_t gtt_memory;
|
||||
size_t gtt_total;
|
||||
size_t mappable_gtt_total;
|
||||
size_t object_memory;
|
||||
u32 object_count;
|
||||
u32 pin_count;
|
||||
u32 gtt_count;
|
||||
} mm;
|
||||
struct sdvo_device_mapping sdvo_mappings[2];
|
||||
/* indicate whether the LVDS_BORDER should be enabled or not */
|
||||
|
@ -688,14 +674,14 @@ typedef struct drm_i915_private {
|
|||
u8 fmax;
|
||||
u8 fstart;
|
||||
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
u64 last_count2;
|
||||
struct timespec last_time2;
|
||||
unsigned long gfx_power;
|
||||
int c_m;
|
||||
int r_t;
|
||||
u8 corr;
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
u64 last_count2;
|
||||
struct timespec last_time2;
|
||||
unsigned long gfx_power;
|
||||
int c_m;
|
||||
int r_t;
|
||||
u8 corr;
|
||||
spinlock_t *mchdev_lock;
|
||||
|
||||
enum no_fbc_reason no_fbc_reason;
|
||||
|
@ -709,20 +695,20 @@ typedef struct drm_i915_private {
|
|||
struct intel_fbdev *fbdev;
|
||||
} drm_i915_private_t;
|
||||
|
||||
/** driver private structure attached to each drm_gem_object */
|
||||
struct drm_i915_gem_object {
|
||||
struct drm_gem_object base;
|
||||
|
||||
/** Current space allocated to this object in the GTT, if any. */
|
||||
struct drm_mm_node *gtt_space;
|
||||
struct list_head gtt_list;
|
||||
|
||||
/** This object's place on the active/flushing/inactive lists */
|
||||
struct list_head ring_list;
|
||||
struct list_head mm_list;
|
||||
/** This object's place on GPU write list */
|
||||
struct list_head gpu_write_list;
|
||||
/** This object's place on eviction list */
|
||||
struct list_head evict_list;
|
||||
/** This object's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
|
||||
/**
|
||||
* This is set if the object is on the active or flushing lists
|
||||
|
@ -737,6 +723,12 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
unsigned int dirty : 1;
|
||||
|
||||
/**
|
||||
* This is set if the object has been written to since the last
|
||||
* GPU flush.
|
||||
*/
|
||||
unsigned int pending_gpu_write : 1;
|
||||
|
||||
/**
|
||||
* Fence register bits (if any) for this object. Will be set
|
||||
* as needed when mapped into the GTT.
|
||||
|
@ -746,30 +738,16 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
signed int fence_reg : 5;
|
||||
|
||||
/**
|
||||
* Used for checking the object doesn't appear more than once
|
||||
* in an execbuffer object list.
|
||||
*/
|
||||
unsigned int in_execbuffer : 1;
|
||||
|
||||
/**
|
||||
* Advice: are the backing pages purgeable?
|
||||
*/
|
||||
unsigned int madv : 2;
|
||||
|
||||
/**
|
||||
* Refcount for the pages array. With the current locking scheme, there
|
||||
* are at most two concurrent users: Binding a bo to the gtt and
|
||||
* pwrite/pread using physical addresses. So two bits for a maximum
|
||||
* of two users are enough.
|
||||
*/
|
||||
unsigned int pages_refcount : 2;
|
||||
#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
|
||||
|
||||
/**
|
||||
* Current tiling mode for the object.
|
||||
*/
|
||||
unsigned int tiling_mode : 2;
|
||||
unsigned int tiling_changed : 1;
|
||||
|
||||
/** How many users have pinned this object in GTT space. The following
|
||||
* users can each hold at most one reference: pwrite/pread, pin_ioctl
|
||||
|
@ -783,11 +761,40 @@ struct drm_i915_gem_object {
|
|||
unsigned int pin_count : 4;
|
||||
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
|
||||
|
||||
/** AGP memory structure for our GTT binding. */
|
||||
DRM_AGP_MEM *agp_mem;
|
||||
/**
|
||||
* Is the object at the current location in the gtt mappable and
|
||||
* fenceable? Used to avoid costly recalculations.
|
||||
*/
|
||||
unsigned int map_and_fenceable : 1;
|
||||
|
||||
/**
|
||||
* Whether the current gtt mapping needs to be mappable (and isn't just
|
||||
* mappable by accident). Track pin and fault separate for a more
|
||||
* accurate mappable working set.
|
||||
*/
|
||||
unsigned int fault_mappable : 1;
|
||||
unsigned int pin_mappable : 1;
|
||||
|
||||
/*
|
||||
* Is the GPU currently using a fence to access this buffer,
|
||||
*/
|
||||
unsigned int pending_fenced_gpu_access:1;
|
||||
unsigned int fenced_gpu_access:1;
|
||||
|
||||
struct page **pages;
|
||||
|
||||
/**
|
||||
* DMAR support
|
||||
*/
|
||||
struct scatterlist *sg_list;
|
||||
int num_sg;
|
||||
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
|
||||
/**
|
||||
* Current offset of the object in GTT space.
|
||||
*
|
||||
|
@ -795,16 +802,13 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
uint32_t gtt_offset;
|
||||
|
||||
/* Which ring is refering to is this object */
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
/**
|
||||
* Fake offset for use by mmap(2)
|
||||
*/
|
||||
uint64_t mmap_offset;
|
||||
|
||||
/** Breadcrumb of last rendering to the buffer. */
|
||||
uint32_t last_rendering_seqno;
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
/** Breadcrumb of last fenced GPU access to the buffer. */
|
||||
uint32_t last_fenced_seqno;
|
||||
struct intel_ring_buffer *last_fenced_ring;
|
||||
|
||||
/** Current tiling stride for the object, if it's tiled. */
|
||||
uint32_t stride;
|
||||
|
@ -880,6 +884,68 @@ enum intel_chip_family {
|
|||
CHIP_I965 = 0x08,
|
||||
};
|
||||
|
||||
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
|
||||
|
||||
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
|
||||
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
|
||||
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
|
||||
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
|
||||
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
|
||||
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
|
||||
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
|
||||
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
|
||||
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
|
||||
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
|
||||
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
|
||||
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
|
||||
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
|
||||
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
||||
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
|
||||
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
|
||||
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
|
||||
#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
|
||||
#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
|
||||
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
|
||||
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
|
||||
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
|
||||
|
||||
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
||||
* rows, which changed the alignment requirements and fence programming.
|
||||
*/
|
||||
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
|
||||
IS_I915GM(dev)))
|
||||
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
|
||||
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
|
||||
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
|
||||
/* dsparb controlled by hw only */
|
||||
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
|
||||
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
|
||||
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
|
||||
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
|
||||
|
||||
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
|
||||
#include "i915_trace.h"
|
||||
|
||||
extern struct drm_ioctl_desc i915_ioctls[];
|
||||
extern int i915_max_ioctl;
|
||||
extern unsigned int i915_fbpercrtc;
|
||||
|
@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
|
|||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
extern int i915_emit_box(struct drm_device *dev,
|
||||
struct drm_clip_rect *boxes,
|
||||
int i, int DR1, int DR4);
|
||||
struct drm_clip_rect *box,
|
||||
int DR1, int DR4);
|
||||
extern int i915_reset(struct drm_device *dev, u8 flags);
|
||||
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
|
||||
|
@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
|||
|
||||
/* i915_irq.c */
|
||||
void i915_hangcheck_elapsed(unsigned long data);
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged);
|
||||
extern int i915_irq_emit(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int i915_irq_wait(struct drm_device *dev, void *data,
|
||||
|
@ -953,6 +1020,13 @@ void
|
|||
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
|
||||
|
||||
void intel_enable_asle (struct drm_device *dev);
|
||||
int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags);
|
||||
|
||||
int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
int *vpos, int *hpos);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern void i915_destroy_error_state(struct drm_device *dev);
|
||||
|
@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_flush_ring(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
uint32_t invalidate_domains,
|
||||
uint32_t flush_domains);
|
||||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
|
||||
void i915_gem_object_unpin(struct drm_gem_object *obj);
|
||||
int i915_gem_object_unbind(struct drm_gem_object *obj);
|
||||
void i915_gem_release_mmap(struct drm_gem_object *obj);
|
||||
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
uint32_t alignment,
|
||||
bool map_and_fenceable);
|
||||
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool interruptible);
|
||||
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno);
|
||||
|
||||
/**
|
||||
* Returns true if seq1 is later than seq2.
|
||||
*/
|
||||
|
@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
|
||||
bool interruptible);
|
||||
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
|
||||
bool interruptible);
|
||||
static inline u32
|
||||
i915_gem_next_request_seqno(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
return ring->outstanding_lazy_request = dev_priv->next_seqno;
|
||||
}
|
||||
|
||||
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *pipelined,
|
||||
bool interruptible);
|
||||
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
|
||||
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
void i915_gem_clflush_object(struct drm_gem_object *obj);
|
||||
int i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
|
||||
bool interruptible);
|
||||
int i915_gem_init_ringbuffer(struct drm_device *dev);
|
||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
|
||||
bool interruptible);
|
||||
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
||||
unsigned long end);
|
||||
int i915_gpu_idle(struct drm_device *dev);
|
||||
int i915_gem_idle(struct drm_device *dev);
|
||||
uint32_t i915_add_request(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_request *request,
|
||||
struct intel_ring_buffer *ring);
|
||||
int i915_do_wait_request(struct drm_device *dev,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
struct intel_ring_buffer *ring);
|
||||
void i915_gem_do_init(struct drm_device *dev,
|
||||
unsigned long start,
|
||||
unsigned long mappable_end,
|
||||
unsigned long end);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_idle(struct drm_device *dev);
|
||||
int __must_check i915_add_request(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_request *request,
|
||||
struct intel_ring_buffer *ring);
|
||||
int __must_check i915_do_wait_request(struct drm_device *dev,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
struct intel_ring_buffer *ring);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
|
||||
int write);
|
||||
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
|
||||
bool pipelined);
|
||||
int __must_check
|
||||
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
|
||||
bool write);
|
||||
int __must_check
|
||||
i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int id,
|
||||
int align);
|
||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_gem_object *obj);
|
||||
struct drm_i915_gem_object *obj);
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
void i915_gem_shrinker_init(void);
|
||||
void i915_gem_shrinker_exit(void);
|
||||
/* i915_gem_gtt.c */
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
|
||||
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* i915_gem_evict.c */
|
||||
int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
|
||||
int i915_gem_evict_everything(struct drm_device *dev);
|
||||
int i915_gem_evict_inactive(struct drm_device *dev);
|
||||
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
unsigned alignment, bool mappable);
|
||||
int __must_check i915_gem_evict_everything(struct drm_device *dev,
|
||||
bool purgeable_only);
|
||||
int __must_check i915_gem_evict_inactive(struct drm_device *dev,
|
||||
bool purgeable_only);
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
|
||||
bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
|
||||
int tiling_mode);
|
||||
bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
|
||||
int tiling_mode);
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* i915_gem_debug.c */
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
#if WATCH_LISTS
|
||||
int i915_verify_lists(struct drm_device *dev);
|
||||
#else
|
||||
#define i915_verify_lists(dev) 0
|
||||
#endif
|
||||
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
|
||||
int handle);
|
||||
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
|
||||
/* i915_debugfs.c */
|
||||
|
@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
|
|||
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_detect_pch (struct drm_device *dev);
|
||||
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
|
||||
|
||||
|
@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
|
|||
#ifdef CONFIG_DEBUG_FS
|
||||
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
|
||||
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
|
||||
|
||||
extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
|
||||
extern void intel_display_print_error_state(struct seq_file *m,
|
||||
struct drm_device *dev,
|
||||
struct intel_display_error_state *error);
|
||||
#endif
|
||||
|
||||
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
|
||||
|
||||
#define BEGIN_LP_RING(n) \
|
||||
intel_ring_begin(LP_RING(dev_priv), (n))
|
||||
|
||||
#define OUT_RING(x) \
|
||||
intel_ring_emit(LP_RING(dev_priv), x)
|
||||
|
||||
#define ADVANCE_LP_RING() \
|
||||
intel_ring_advance(LP_RING(dev_priv))
|
||||
|
||||
/**
|
||||
* Lock test for when it's just for synchronization of ring access.
|
||||
*
|
||||
* In that case, we don't need to do it when GEM is initialized as nobody else
|
||||
* has access to the ring.
|
||||
*/
|
||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
|
||||
if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
|
||||
== NULL) \
|
||||
LOCK_TEST_WITH_RETURN(dev, file_priv); \
|
||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
|
||||
if (LP_RING(dev->dev_private)->obj == NULL) \
|
||||
LOCK_TEST_WITH_RETURN(dev, file); \
|
||||
} while (0)
|
||||
|
||||
static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
|
||||
#define __i915_read(x, y) \
|
||||
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
u##x val = read##y(dev_priv->regs + reg); \
|
||||
trace_i915_reg_rw('R', reg, val, sizeof(val)); \
|
||||
return val; \
|
||||
}
|
||||
__i915_read(8, b)
|
||||
__i915_read(16, w)
|
||||
__i915_read(32, l)
|
||||
__i915_read(64, q)
|
||||
#undef __i915_read
|
||||
|
||||
#define __i915_write(x, y) \
|
||||
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
||||
trace_i915_reg_rw('W', reg, val, sizeof(val)); \
|
||||
write##y(val, dev_priv->regs + reg); \
|
||||
}
|
||||
__i915_write(8, b)
|
||||
__i915_write(16, w)
|
||||
__i915_write(32, l)
|
||||
__i915_write(64, q)
|
||||
#undef __i915_write
|
||||
|
||||
#define I915_READ8(reg) i915_read8(dev_priv, (reg))
|
||||
#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
|
||||
|
||||
#define I915_READ16(reg) i915_read16(dev_priv, (reg))
|
||||
#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
|
||||
#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
|
||||
#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
|
||||
|
||||
#define I915_READ(reg) i915_read32(dev_priv, (reg))
|
||||
#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
|
||||
#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
|
||||
#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
|
||||
|
||||
#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
|
||||
#define I915_READ64(reg) i915_read64(dev_priv, (reg))
|
||||
|
||||
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
|
||||
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
|
||||
|
||||
|
||||
/* On SNB platform, before reading ring registers forcewake bit
|
||||
* must be set to prevent GT core from power down and stale values being
|
||||
* returned.
|
||||
*/
|
||||
void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
|
||||
void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
|
||||
static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(dev_priv->regs + reg);
|
||||
if (dev_priv->debug_flags & I915_DEBUG_READ)
|
||||
printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
|
||||
if (dev_priv->info->gen >= 6) {
|
||||
__gen6_force_wake_get(dev_priv);
|
||||
val = I915_READ(reg);
|
||||
__gen6_force_wake_put(dev_priv);
|
||||
} else
|
||||
val = I915_READ(reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
||||
u32 val)
|
||||
static inline void
|
||||
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
|
||||
{
|
||||
writel(val, dev_priv->regs + reg);
|
||||
if (dev_priv->debug_flags & I915_DEBUG_WRITE)
|
||||
printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
|
||||
/* Trace down the write operation before the real write */
|
||||
trace_i915_reg_rw('W', reg, val, len);
|
||||
switch (len) {
|
||||
case 8:
|
||||
writeq(val, dev_priv->regs + reg);
|
||||
break;
|
||||
case 4:
|
||||
writel(val, dev_priv->regs + reg);
|
||||
break;
|
||||
case 2:
|
||||
writew(val, dev_priv->regs + reg);
|
||||
break;
|
||||
case 1:
|
||||
writeb(val, dev_priv->regs + reg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define I915_READ(reg) i915_read(dev_priv, (reg))
|
||||
#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
|
||||
#define I915_READ16(reg) readw(dev_priv->regs + (reg))
|
||||
#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
|
||||
#define I915_READ8(reg) readb(dev_priv->regs + (reg))
|
||||
#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
|
||||
#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
|
||||
#define I915_READ64(reg) readq(dev_priv->regs + (reg))
|
||||
#define POSTING_READ(reg) (void)I915_READ(reg)
|
||||
#define POSTING_READ16(reg) (void)I915_READ16(reg)
|
||||
|
||||
#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
|
||||
I915_DEBUG_WRITE)
|
||||
#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
|
||||
I915_DEBUG_WRITE))
|
||||
|
||||
#define I915_VERBOSE 0
|
||||
|
||||
#define BEGIN_LP_RING(n) do { \
|
||||
drm_i915_private_t *dev_priv__ = dev->dev_private; \
|
||||
if (I915_VERBOSE) \
|
||||
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
|
||||
intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define OUT_RING(x) do { \
|
||||
drm_i915_private_t *dev_priv__ = dev->dev_private; \
|
||||
if (I915_VERBOSE) \
|
||||
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
|
||||
intel_ring_emit(dev, &dev_priv__->render_ring, x); \
|
||||
} while (0)
|
||||
|
||||
#define ADVANCE_LP_RING() do { \
|
||||
drm_i915_private_t *dev_priv__ = dev->dev_private; \
|
||||
if (I915_VERBOSE) \
|
||||
DRM_DEBUG("ADVANCE_LP_RING %x\n", \
|
||||
dev_priv__->render_ring.tail); \
|
||||
intel_ring_advance(dev, &dev_priv__->render_ring); \
|
||||
} while(0)
|
||||
|
||||
/**
|
||||
* Reads a dword out of the status page, which is written to from the command
|
||||
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
|
||||
|
@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
|||
* The area from dword 0x20 to 0x3ff is available for driver usage.
|
||||
*/
|
||||
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
|
||||
(dev_priv->render_ring.status_page.page_addr))[reg])
|
||||
(LP_RING(dev_priv)->status_page.page_addr))[reg])
|
||||
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
|
||||
#define I915_GEM_HWS_INDEX 0x20
|
||||
#define I915_BREADCRUMB_INDEX 0x21
|
||||
|
||||
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
|
||||
|
||||
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
|
||||
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
|
||||
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
|
||||
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
|
||||
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
|
||||
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
|
||||
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
|
||||
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
|
||||
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
|
||||
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
|
||||
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
|
||||
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
|
||||
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
|
||||
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
||||
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
|
||||
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
|
||||
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
|
||||
#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
|
||||
#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
|
||||
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
|
||||
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
|
||||
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
|
||||
|
||||
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
||||
* rows, which changed the alignment requirements and fence programming.
|
||||
*/
|
||||
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
|
||||
IS_I915GM(dev)))
|
||||
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
|
||||
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
|
||||
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
|
||||
/* dsparb controlled by hw only */
|
||||
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
|
||||
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
|
||||
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
|
||||
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
|
||||
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
|
||||
|
||||
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
|
||||
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
|
||||
|
||||
#endif
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
|
|||
}
|
||||
|
||||
void
|
||||
i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int page;
|
||||
|
||||
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
|
||||
DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
|
||||
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
|
||||
int page_len, chunk, chunk_len;
|
||||
|
||||
|
@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
|||
chunk_len = page_len - chunk;
|
||||
if (chunk_len > 128)
|
||||
chunk_len = 128;
|
||||
i915_gem_dump_page(obj_priv->pages[page],
|
||||
i915_gem_dump_page(obj->pages[page],
|
||||
chunk, chunk + chunk_len,
|
||||
obj_priv->gtt_offset +
|
||||
obj->gtt_offset +
|
||||
page * PAGE_SIZE,
|
||||
mark);
|
||||
}
|
||||
|
@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
|||
|
||||
#if WATCH_COHERENCY
|
||||
void
|
||||
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
||||
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
int page;
|
||||
uint32_t *gtt_mapping;
|
||||
uint32_t *backing_map = NULL;
|
||||
int bad_count = 0;
|
||||
|
||||
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
|
||||
__func__, obj, obj_priv->gtt_offset, handle,
|
||||
__func__, obj, obj->gtt_offset, handle,
|
||||
obj->size / 1024);
|
||||
|
||||
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
|
||||
obj->size);
|
||||
gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
|
||||
if (gtt_mapping == NULL) {
|
||||
DRM_ERROR("failed to map GTT space\n");
|
||||
return;
|
||||
|
@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
|||
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
|
||||
int i;
|
||||
|
||||
backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
|
||||
backing_map = kmap_atomic(obj->pages[page], KM_USER0);
|
||||
|
||||
if (backing_map == NULL) {
|
||||
DRM_ERROR("failed to map backing page\n");
|
||||
|
@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
|||
if (cpuval != gttval) {
|
||||
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
|
||||
"0x%08x vs 0x%08x\n",
|
||||
(int)(obj_priv->gtt_offset +
|
||||
(int)(obj->gtt_offset +
|
||||
page * PAGE_SIZE + i * 4),
|
||||
cpuval, gttval);
|
||||
if (bad_count++ >= 8) {
|
||||
|
|
|
@ -32,28 +32,36 @@
|
|||
#include "i915_drm.h"
|
||||
|
||||
static bool
|
||||
mark_free(struct drm_i915_gem_object *obj_priv,
|
||||
struct list_head *unwind)
|
||||
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
|
||||
{
|
||||
list_add(&obj_priv->evict_list, unwind);
|
||||
drm_gem_object_reference(&obj_priv->base);
|
||||
return drm_mm_scan_add_block(obj_priv->gtt_space);
|
||||
list_add(&obj->exec_list, unwind);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
return drm_mm_scan_add_block(obj->gtt_space);
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
|
||||
i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
unsigned alignment, bool mappable)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct list_head eviction_list, unwind_list;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Re-check for free space after retiring requests */
|
||||
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
|
||||
min_size, alignment, 0))
|
||||
return 0;
|
||||
if (mappable) {
|
||||
if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
|
||||
min_size, alignment, 0,
|
||||
dev_priv->mm.gtt_mappable_end,
|
||||
0))
|
||||
return 0;
|
||||
} else {
|
||||
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
|
||||
min_size, alignment, 0))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The goal is to evict objects and amalgamate space in LRU order.
|
||||
|
@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
|
|||
*/
|
||||
|
||||
INIT_LIST_HEAD(&unwind_list);
|
||||
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
|
||||
if (mappable)
|
||||
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
|
||||
alignment, 0,
|
||||
dev_priv->mm.gtt_mappable_end);
|
||||
else
|
||||
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
|
||||
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Now merge in the soon-to-be-expired objects... */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
/* Does the object require an outstanding flush? */
|
||||
if (obj_priv->base.write_domain || obj_priv->pin_count)
|
||||
if (obj->base.write_domain || obj->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Finally add anything with a pending flush (in order of retirement) */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (obj_priv->pin_count)
|
||||
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (obj->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
if (! obj_priv->base.write_domain || obj_priv->pin_count)
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (! obj->base.write_domain || obj->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Nothing found, clean up and bail out! */
|
||||
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
|
||||
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
|
||||
list_for_each_entry(obj, &unwind_list, exec_list) {
|
||||
ret = drm_mm_scan_remove_block(obj->gtt_space);
|
||||
BUG_ON(ret);
|
||||
drm_gem_object_unreference(&obj_priv->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
/* We expect the caller to unpin, evict all and try again, or give up.
|
||||
|
@ -131,33 +144,33 @@ found:
|
|||
* temporary list. */
|
||||
INIT_LIST_HEAD(&eviction_list);
|
||||
while (!list_empty(&unwind_list)) {
|
||||
obj_priv = list_first_entry(&unwind_list,
|
||||
struct drm_i915_gem_object,
|
||||
evict_list);
|
||||
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
|
||||
list_move(&obj_priv->evict_list, &eviction_list);
|
||||
obj = list_first_entry(&unwind_list,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
if (drm_mm_scan_remove_block(obj->gtt_space)) {
|
||||
list_move(&obj->exec_list, &eviction_list);
|
||||
continue;
|
||||
}
|
||||
list_del(&obj_priv->evict_list);
|
||||
drm_gem_object_unreference(&obj_priv->base);
|
||||
list_del_init(&obj->exec_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
while (!list_empty(&eviction_list)) {
|
||||
obj_priv = list_first_entry(&eviction_list,
|
||||
struct drm_i915_gem_object,
|
||||
evict_list);
|
||||
obj = list_first_entry(&eviction_list,
|
||||
struct drm_i915_gem_object,
|
||||
exec_list);
|
||||
if (ret == 0)
|
||||
ret = i915_gem_object_unbind(&obj_priv->base);
|
||||
list_del(&obj_priv->evict_list);
|
||||
drm_gem_object_unreference(&obj_priv->base);
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
list_del_init(&obj->exec_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_evict_everything(struct drm_device *dev)
|
||||
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
|
||||
ret = i915_gem_evict_inactive(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
BUG_ON(!lists_empty);
|
||||
|
||||
return 0;
|
||||
return i915_gem_evict_inactive(dev, purgeable_only);
|
||||
}
|
||||
|
||||
/** Unbinds all inactive objects. */
|
||||
int
|
||||
i915_gem_evict_inactive(struct drm_device *dev)
|
||||
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
|
||||
while (!list_empty(&dev_priv->mm.inactive_list)) {
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = &list_first_entry(&dev_priv->mm.inactive_list,
|
||||
struct drm_i915_gem_object,
|
||||
mm_list)->base;
|
||||
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Error unbinding object: %d\n", ret);
|
||||
return ret;
|
||||
list_for_each_entry_safe(obj, next,
|
||||
&dev_priv->mm.inactive_list, mm_list) {
|
||||
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
|
||||
int ret = i915_gem_object_unbind(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright © 2010 Daniel Vetter
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
|
||||
i915_gem_clflush_object(obj);
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
BUG_ON(!obj->sg_list);
|
||||
|
||||
intel_gtt_insert_sg_entries(obj->sg_list,
|
||||
obj->num_sg,
|
||||
obj->gtt_space->start
|
||||
>> PAGE_SHIFT,
|
||||
obj->agp_type);
|
||||
} else
|
||||
intel_gtt_insert_pages(obj->gtt_space->start
|
||||
>> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
obj->pages,
|
||||
obj->agp_type);
|
||||
}
|
||||
|
||||
intel_gtt_chipset_flush();
|
||||
}
|
||||
|
||||
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
ret = intel_gtt_map_memory(obj->pages,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
&obj->sg_list,
|
||||
&obj->num_sg);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
intel_gtt_insert_sg_entries(obj->sg_list,
|
||||
obj->num_sg,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->agp_type);
|
||||
} else
|
||||
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
obj->pages,
|
||||
obj->agp_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
|
||||
obj->sg_list = NULL;
|
||||
obj->num_sg = 0;
|
||||
}
|
||||
|
||||
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
}
|
|
@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* Check pitch constriants for all chips & tiling formats */
|
||||
bool
|
||||
static bool
|
||||
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
{
|
||||
int tile_width;
|
||||
|
@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
|
||||
/* Is the current GTT allocation valid for the change in tiling? */
|
||||
static bool
|
||||
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
|
||||
if (obj_priv->gtt_space == NULL)
|
||||
return true;
|
||||
u32 size;
|
||||
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
return true;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
if (INTEL_INFO(obj->base.dev)->gen >= 4)
|
||||
return true;
|
||||
|
||||
if (obj_priv->gtt_offset & (obj->size - 1))
|
||||
return false;
|
||||
|
||||
if (IS_GEN3(dev)) {
|
||||
if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
|
||||
if (INTEL_INFO(obj->base.dev)->gen == 3) {
|
||||
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
|
||||
return false;
|
||||
} else {
|
||||
if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
|
||||
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Previous chips need to be aligned to the size of the smallest
|
||||
* fence register that can contain the object.
|
||||
*/
|
||||
if (INTEL_INFO(obj->base.dev)->gen == 3)
|
||||
size = 1024*1024;
|
||||
else
|
||||
size = 512*1024;
|
||||
|
||||
while (size < obj->base.size)
|
||||
size <<= 1;
|
||||
|
||||
if (obj->gtt_space->size != size)
|
||||
return false;
|
||||
|
||||
if (obj->gtt_offset & (size - 1))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
|
|||
*/
|
||||
int
|
||||
i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_set_tiling *args = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_check_is_wedged(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (!i915_tiling_ok(dev,
|
||||
args->stride, obj->base.size, args->tiling_mode)) {
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (obj_priv->pin_count) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (obj->pin_count) {
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (args->tiling_mode != obj_priv->tiling_mode ||
|
||||
args->stride != obj_priv->stride) {
|
||||
if (args->tiling_mode != obj->tiling_mode ||
|
||||
args->stride != obj->stride) {
|
||||
/* We need to rebind the object if its current allocation
|
||||
* no longer meets the alignment restrictions for its new
|
||||
* tiling mode. Otherwise we can just leave it alone, but
|
||||
* need to ensure that any fence register is cleared.
|
||||
*/
|
||||
if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
||||
ret = i915_gem_object_put_fence_reg(obj, true);
|
||||
else
|
||||
i915_gem_release_mmap(obj);
|
||||
i915_gem_release_mmap(obj);
|
||||
|
||||
if (ret != 0) {
|
||||
args->tiling_mode = obj_priv->tiling_mode;
|
||||
args->stride = obj_priv->stride;
|
||||
goto err;
|
||||
}
|
||||
obj->map_and_fenceable =
|
||||
obj->gtt_space == NULL ||
|
||||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
|
||||
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
||||
|
||||
obj_priv->tiling_mode = args->tiling_mode;
|
||||
obj_priv->stride = args->stride;
|
||||
obj->tiling_changed = true;
|
||||
obj->tiling_mode = args->tiling_mode;
|
||||
obj->stride = args->stride;
|
||||
}
|
||||
err:
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -359,22 +364,20 @@ err:
|
|||
*/
|
||||
int
|
||||
i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_get_tiling *args = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
args->tiling_mode = obj_priv->tiling_mode;
|
||||
switch (obj_priv->tiling_mode) {
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_X:
|
||||
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
|
||||
break;
|
||||
|
@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
|||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
|
|||
}
|
||||
|
||||
void
|
||||
i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
|
||||
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int page_count = obj->size >> PAGE_SHIFT;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||
return;
|
||||
|
||||
if (obj_priv->bit_17 == NULL)
|
||||
if (obj->bit_17 == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
|
||||
char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
|
||||
if ((new_bit_17 & 0x1) !=
|
||||
(test_bit(i, obj_priv->bit_17) != 0)) {
|
||||
i915_gem_swizzle_page(obj_priv->pages[i]);
|
||||
set_page_dirty(obj_priv->pages[i]);
|
||||
(test_bit(i, obj->bit_17) != 0)) {
|
||||
i915_gem_swizzle_page(obj->pages[i]);
|
||||
set_page_dirty(obj->pages[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
|
||||
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int page_count = obj->size >> PAGE_SHIFT;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||
return;
|
||||
|
||||
if (obj_priv->bit_17 == NULL) {
|
||||
obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||
if (obj->bit_17 == NULL) {
|
||||
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (obj_priv->bit_17 == NULL) {
|
||||
if (obj->bit_17 == NULL) {
|
||||
DRM_ERROR("Failed to allocate memory for bit 17 "
|
||||
"record\n");
|
||||
return;
|
||||
|
@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
|
|||
}
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
|
||||
__set_bit(i, obj_priv->bit_17);
|
||||
if (page_to_phys(obj->pages[i]) & (1 << 17))
|
||||
__set_bit(i, obj->bit_17);
|
||||
else
|
||||
__clear_bit(i, obj_priv->bit_17);
|
||||
__clear_bit(i, obj->bit_17);
|
||||
}
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -78,6 +78,12 @@
|
|||
#define GRDOM_RENDER (1<<2)
|
||||
#define GRDOM_MEDIA (3<<2)
|
||||
|
||||
#define GEN6_GDRST 0x941c
|
||||
#define GEN6_GRDOM_FULL (1 << 0)
|
||||
#define GEN6_GRDOM_RENDER (1 << 1)
|
||||
#define GEN6_GRDOM_MEDIA (1 << 2)
|
||||
#define GEN6_GRDOM_BLT (1 << 3)
|
||||
|
||||
/* VGA stuff */
|
||||
|
||||
#define VGA_ST01_MDA 0x3ba
|
||||
|
@ -158,12 +164,23 @@
|
|||
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
|
||||
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
|
||||
#define MI_STORE_DWORD_INDEX_SHIFT 2
|
||||
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
|
||||
/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
|
||||
* - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
|
||||
* simply ignores the register load under certain conditions.
|
||||
* - One can actually load arbitrary many arbitrary registers: Simply issue x
|
||||
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
|
||||
*/
|
||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
|
||||
#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
|
||||
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
#define MI_BATCH_NON_SECURE_I965 (1<<8)
|
||||
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
|
||||
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
|
||||
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
|
||||
#define MI_SEMAPHORE_UPDATE (1<<21)
|
||||
#define MI_SEMAPHORE_COMPARE (1<<20)
|
||||
#define MI_SEMAPHORE_REGISTER (1<<18)
|
||||
/*
|
||||
* 3D instructions used by the kernel
|
||||
*/
|
||||
|
@ -256,10 +273,6 @@
|
|||
* Instruction and interrupt control regs
|
||||
*/
|
||||
#define PGTBL_ER 0x02024
|
||||
#define PRB0_TAIL 0x02030
|
||||
#define PRB0_HEAD 0x02034
|
||||
#define PRB0_START 0x02038
|
||||
#define PRB0_CTL 0x0203c
|
||||
#define RENDER_RING_BASE 0x02000
|
||||
#define BSD_RING_BASE 0x04000
|
||||
#define GEN6_BSD_RING_BASE 0x12000
|
||||
|
@ -268,9 +281,13 @@
|
|||
#define RING_HEAD(base) ((base)+0x34)
|
||||
#define RING_START(base) ((base)+0x38)
|
||||
#define RING_CTL(base) ((base)+0x3c)
|
||||
#define RING_SYNC_0(base) ((base)+0x40)
|
||||
#define RING_SYNC_1(base) ((base)+0x44)
|
||||
#define RING_MAX_IDLE(base) ((base)+0x54)
|
||||
#define RING_HWS_PGA(base) ((base)+0x80)
|
||||
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
|
||||
#define RING_ACTHD(base) ((base)+0x74)
|
||||
#define RING_NOPID(base) ((base)+0x94)
|
||||
#define TAIL_ADDR 0x001FFFF8
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
|
@ -285,10 +302,17 @@
|
|||
#define RING_INVALID 0x00000000
|
||||
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
|
||||
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
|
||||
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
|
||||
#if 0
|
||||
#define PRB0_TAIL 0x02030
|
||||
#define PRB0_HEAD 0x02034
|
||||
#define PRB0_START 0x02038
|
||||
#define PRB0_CTL 0x0203c
|
||||
#define PRB1_TAIL 0x02040 /* 915+ only */
|
||||
#define PRB1_HEAD 0x02044 /* 915+ only */
|
||||
#define PRB1_START 0x02048 /* 915+ only */
|
||||
#define PRB1_CTL 0x0204c /* 915+ only */
|
||||
#endif
|
||||
#define IPEIR_I965 0x02064
|
||||
#define IPEHR_I965 0x02068
|
||||
#define INSTDONE_I965 0x0206c
|
||||
|
@ -305,11 +329,42 @@
|
|||
#define INSTDONE 0x02090
|
||||
#define NOPID 0x02094
|
||||
#define HWSTAM 0x02098
|
||||
#define VCS_INSTDONE 0x1206C
|
||||
#define VCS_IPEIR 0x12064
|
||||
#define VCS_IPEHR 0x12068
|
||||
#define VCS_ACTHD 0x12074
|
||||
#define BCS_INSTDONE 0x2206C
|
||||
#define BCS_IPEIR 0x22064
|
||||
#define BCS_IPEHR 0x22068
|
||||
#define BCS_ACTHD 0x22074
|
||||
|
||||
#define ERROR_GEN6 0x040a0
|
||||
|
||||
/* GM45+ chicken bits -- debug workaround bits that may be required
|
||||
* for various sorts of correct behavior. The top 16 bits of each are
|
||||
* the enables for writing to the corresponding low bit.
|
||||
*/
|
||||
#define _3D_CHICKEN 0x02084
|
||||
#define _3D_CHICKEN2 0x0208c
|
||||
/* Disables pipelining of read flushes past the SF-WIZ interface.
|
||||
* Required on all Ironlake steppings according to the B-Spec, but the
|
||||
* particular danger of not doing so is not specified.
|
||||
*/
|
||||
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
|
||||
#define _3D_CHICKEN3 0x02090
|
||||
|
||||
#define MI_MODE 0x0209c
|
||||
# define VS_TIMER_DISPATCH (1 << 6)
|
||||
# define MI_FLUSH_ENABLE (1 << 11)
|
||||
|
||||
#define GFX_MODE 0x02520
|
||||
#define GFX_RUN_LIST_ENABLE (1<<15)
|
||||
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
|
||||
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
|
||||
#define GFX_REPLAY_MODE (1<<11)
|
||||
#define GFX_PSMI_GRANULARITY (1<<10)
|
||||
#define GFX_PPGTT_ENABLE (1<<9)
|
||||
|
||||
#define SCPD0 0x0209c /* 915+ only */
|
||||
#define IER 0x020a0
|
||||
#define IIR 0x020a4
|
||||
|
@ -461,7 +516,7 @@
|
|||
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
|
||||
|
||||
#define GEN6_BSD_IMR 0x120a8
|
||||
#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
|
||||
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
|
||||
|
||||
#define GEN6_BSD_RNCID 0x12198
|
||||
|
||||
|
@ -541,6 +596,18 @@
|
|||
|
||||
#define ILK_DISPLAY_CHICKEN1 0x42000
|
||||
#define ILK_FBCQ_DIS (1<<22)
|
||||
#define ILK_PABSTRETCH_DIS (1<<21)
|
||||
|
||||
|
||||
/*
|
||||
* Framebuffer compression for Sandybridge
|
||||
*
|
||||
* The following two registers are of type GTTMMADR
|
||||
*/
|
||||
#define SNB_DPFC_CTL_SA 0x100100
|
||||
#define SNB_CPU_FENCE_ENABLE (1<<29)
|
||||
#define DPFC_CPU_FENCE_OFFSET 0x100104
|
||||
|
||||
|
||||
/*
|
||||
* GPIO regs
|
||||
|
@ -900,6 +967,8 @@
|
|||
*/
|
||||
#define MCHBAR_MIRROR_BASE 0x10000
|
||||
|
||||
#define MCHBAR_MIRROR_BASE_SNB 0x140000
|
||||
|
||||
/** 915-945 and GM965 MCH register controlling DRAM channel access */
|
||||
#define DCC 0x10200
|
||||
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
|
||||
|
@ -1119,6 +1188,10 @@
|
|||
#define DDRMPLL1 0X12c20
|
||||
#define PEG_BAND_GAP_DATA 0x14d68
|
||||
|
||||
#define GEN6_GT_PERF_STATUS 0x145948
|
||||
#define GEN6_RP_STATE_LIMITS 0x145994
|
||||
#define GEN6_RP_STATE_CAP 0x145998
|
||||
|
||||
/*
|
||||
* Logical Context regs
|
||||
*/
|
||||
|
@ -1168,7 +1241,6 @@
|
|||
#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
|
||||
#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
|
||||
#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
|
||||
#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
|
||||
#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
|
||||
|
||||
/* VGA port control */
|
||||
|
@ -2182,8 +2254,10 @@
|
|||
#define PIPE_6BPC (2 << 5)
|
||||
#define PIPE_12BPC (3 << 5)
|
||||
|
||||
#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
|
||||
#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
|
||||
#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
|
||||
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
|
||||
|
||||
#define DSPARB 0x70030
|
||||
#define DSPARB_CSTART_MASK (0x7f << 7)
|
||||
|
@ -2291,6 +2365,40 @@
|
|||
|
||||
#define ILK_FIFO_LINE_SIZE 64
|
||||
|
||||
/* define the WM info on Sandybridge */
|
||||
#define SNB_DISPLAY_FIFO 128
|
||||
#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
|
||||
#define SNB_DISPLAY_DFTWM 8
|
||||
#define SNB_CURSOR_FIFO 32
|
||||
#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
|
||||
#define SNB_CURSOR_DFTWM 8
|
||||
|
||||
#define SNB_DISPLAY_SR_FIFO 512
|
||||
#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
|
||||
#define SNB_DISPLAY_DFT_SRWM 0x3f
|
||||
#define SNB_CURSOR_SR_FIFO 64
|
||||
#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
|
||||
#define SNB_CURSOR_DFT_SRWM 8
|
||||
|
||||
#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
|
||||
|
||||
#define SNB_FIFO_LINE_SIZE 64
|
||||
|
||||
|
||||
/* the address where we get all kinds of latency value */
|
||||
#define SSKPD 0x5d10
|
||||
#define SSKPD_WM_MASK 0x3f
|
||||
#define SSKPD_WM0_SHIFT 0
|
||||
#define SSKPD_WM1_SHIFT 8
|
||||
#define SSKPD_WM2_SHIFT 16
|
||||
#define SSKPD_WM3_SHIFT 24
|
||||
|
||||
#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
|
||||
#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
|
||||
#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
|
||||
#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
|
||||
#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
|
||||
|
||||
/*
|
||||
* The two pipe frame counter registers are not synchronized, so
|
||||
* reading a stable value is somewhat tricky. The following code
|
||||
|
@ -2351,6 +2459,10 @@
|
|||
#define CURBBASE 0x700c4
|
||||
#define CURBPOS 0x700c8
|
||||
|
||||
#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
|
||||
#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
|
||||
#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
|
||||
|
||||
/* Display A control */
|
||||
#define DSPACNTR 0x70180
|
||||
#define DISPLAY_PLANE_ENABLE (1<<31)
|
||||
|
@ -2589,6 +2701,8 @@
|
|||
#define GTIER 0x4401c
|
||||
|
||||
#define ILK_DISPLAY_CHICKEN2 0x42004
|
||||
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
|
||||
#define ILK_ELPIN_409_SELECT (1 << 25)
|
||||
#define ILK_DPARB_GATE (1<<22)
|
||||
#define ILK_VSDPFD_FULL (1<<21)
|
||||
#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
|
||||
|
@ -2600,6 +2714,8 @@
|
|||
#define ILK_DESKTOP (1<<23)
|
||||
#define ILK_DSPCLK_GATE 0x42020
|
||||
#define ILK_DPARB_CLK_GATE (1<<5)
|
||||
#define ILK_DPFD_CLK_GATE (1<<7)
|
||||
|
||||
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
|
||||
#define ILK_CLK_FBC (1<<7)
|
||||
#define ILK_DPFC_DIS1 (1<<8)
|
||||
|
@ -2679,6 +2795,7 @@
|
|||
#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
|
||||
|
||||
#define PCH_FPA0 0xc6040
|
||||
#define FP_CB_TUNE (0x3<<22)
|
||||
#define PCH_FPA1 0xc6044
|
||||
#define PCH_FPB0 0xc6048
|
||||
#define PCH_FPB1 0xc604c
|
||||
|
@ -3063,4 +3180,66 @@
|
|||
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
|
||||
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
|
||||
|
||||
#define FORCEWAKE 0xA18C
|
||||
#define FORCEWAKE_ACK 0x130090
|
||||
|
||||
#define GEN6_RPNSWREQ 0xA008
|
||||
#define GEN6_TURBO_DISABLE (1<<31)
|
||||
#define GEN6_FREQUENCY(x) ((x)<<25)
|
||||
#define GEN6_OFFSET(x) ((x)<<19)
|
||||
#define GEN6_AGGRESSIVE_TURBO (0<<15)
|
||||
#define GEN6_RC_VIDEO_FREQ 0xA00C
|
||||
#define GEN6_RC_CONTROL 0xA090
|
||||
#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
|
||||
#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
|
||||
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
|
||||
#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
|
||||
#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
|
||||
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
|
||||
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
|
||||
#define GEN6_RP_DOWN_TIMEOUT 0xA010
|
||||
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
|
||||
#define GEN6_RPSTAT1 0xA01C
|
||||
#define GEN6_RP_CONTROL 0xA024
|
||||
#define GEN6_RP_MEDIA_TURBO (1<<11)
|
||||
#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
|
||||
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
|
||||
#define GEN6_RP_ENABLE (1<<7)
|
||||
#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
|
||||
#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
|
||||
#define GEN6_RP_UP_THRESHOLD 0xA02C
|
||||
#define GEN6_RP_DOWN_THRESHOLD 0xA030
|
||||
#define GEN6_RP_UP_EI 0xA068
|
||||
#define GEN6_RP_DOWN_EI 0xA06C
|
||||
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
|
||||
#define GEN6_RC_STATE 0xA094
|
||||
#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
|
||||
#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
|
||||
#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
|
||||
#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
|
||||
#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
|
||||
#define GEN6_RC_SLEEP 0xA0B0
|
||||
#define GEN6_RC1e_THRESHOLD 0xA0B4
|
||||
#define GEN6_RC6_THRESHOLD 0xA0B8
|
||||
#define GEN6_RC6p_THRESHOLD 0xA0BC
|
||||
#define GEN6_RC6pp_THRESHOLD 0xA0C0
|
||||
#define GEN6_PMINTRMSK 0xA168
|
||||
|
||||
#define GEN6_PMISR 0x44020
|
||||
#define GEN6_PMIMR 0x44024
|
||||
#define GEN6_PMIIR 0x44028
|
||||
#define GEN6_PMIER 0x4402C
|
||||
#define GEN6_PM_MBOX_EVENT (1<<25)
|
||||
#define GEN6_PM_THERMAL_EVENT (1<<24)
|
||||
#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
|
||||
#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
|
||||
#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
|
||||
#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
|
||||
#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
|
||||
|
||||
#define GEN6_PCODE_MAILBOX 0x138124
|
||||
#define GEN6_PCODE_READY (1<<31)
|
||||
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
|
||||
#define GEN6_PCODE_DATA 0x138128
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
|
|
@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
|
|||
static void i915_save_modeset_reg(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
|
|||
}
|
||||
i915_save_palette(dev, PIPE_B);
|
||||
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 6:
|
||||
for (i = 0; i < 16; i++)
|
||||
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
|
||||
break;
|
||||
case 5:
|
||||
case 4:
|
||||
for (i = 0; i < 16; i++)
|
||||
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
|
||||
break;
|
||||
case 3:
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
|
||||
case 2:
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int dpll_a_reg, fpa0_reg, fpa1_reg;
|
||||
int dpll_b_reg, fpb0_reg, fpb1_reg;
|
||||
int i;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 6:
|
||||
for (i = 0; i < 16; i++)
|
||||
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
|
||||
break;
|
||||
case 5:
|
||||
case 4:
|
||||
for (i = 0; i < 16; i++)
|
||||
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
|
||||
break;
|
||||
case 3:
|
||||
case 2:
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
|
||||
for (i = 0; i < 8; i++)
|
||||
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
dpll_a_reg = PCH_DPLL_A;
|
||||
dpll_b_reg = PCH_DPLL_B;
|
||||
|
@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
|
|||
dev_priv->saveIMR = I915_READ(IMR);
|
||||
}
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (IS_IRONLAKE_M(dev))
|
||||
ironlake_disable_drps(dev);
|
||||
if (IS_GEN6(dev))
|
||||
gen6_disable_rps(dev);
|
||||
|
||||
/* XXX disabling the clock gating breaks suspend on gm45
|
||||
intel_disable_clock_gating(dev);
|
||||
*/
|
||||
|
||||
/* Cache mode state */
|
||||
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
|
||||
|
@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
|
|||
for (i = 0; i < 3; i++)
|
||||
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 6:
|
||||
for (i = 0; i < 16; i++)
|
||||
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
|
||||
break;
|
||||
case 5:
|
||||
case 4:
|
||||
for (i = 0; i < 16; i++)
|
||||
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
|
||||
break;
|
||||
case 3:
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
|
||||
case 2:
|
||||
for (i = 0; i < 8; i++)
|
||||
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
|
|||
/* Hardware status page */
|
||||
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 6:
|
||||
for (i = 0; i < 16; i++)
|
||||
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
|
||||
break;
|
||||
case 5:
|
||||
case 4:
|
||||
for (i = 0; i < 16; i++)
|
||||
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
|
||||
break;
|
||||
case 3:
|
||||
case 2:
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
|
||||
for (i = 0; i < 8; i++)
|
||||
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
|
||||
break;
|
||||
}
|
||||
|
||||
i915_restore_display(dev);
|
||||
|
||||
/* Interrupt state */
|
||||
|
@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* Clock gating state */
|
||||
intel_init_clock_gating(dev);
|
||||
intel_enable_clock_gating(dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
ironlake_enable_drps(dev);
|
||||
intel_init_emon(dev);
|
||||
}
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
gen6_enable_rps(dev_priv);
|
||||
|
||||
/* Cache mode state */
|
||||
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM i915
|
||||
|
@ -16,18 +17,18 @@
|
|||
|
||||
TRACE_EVENT(i915_gem_object_create,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(u32, size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->size = obj->size;
|
||||
__entry->size = obj->base.size;
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
|
||||
|
@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
|
|||
|
||||
TRACE_EVENT(i915_gem_object_bind,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
|
||||
|
||||
TP_ARGS(obj, gtt_offset),
|
||||
TP_ARGS(obj, gtt_offset, mappable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(u32, gtt_offset)
|
||||
__field(bool, mappable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->gtt_offset = gtt_offset;
|
||||
__entry->mappable = mappable;
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, gtt_offset=%08x",
|
||||
__entry->obj, __entry->gtt_offset)
|
||||
TP_printk("obj=%p, gtt_offset=%08x%s",
|
||||
__entry->obj, __entry->gtt_offset,
|
||||
__entry->mappable ? ", mappable" : "")
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_object_change_domain,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
|
||||
|
||||
TP_ARGS(obj, old_read_domains, old_write_domain),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(u32, read_domains)
|
||||
__field(u32, write_domain)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->read_domains = obj->read_domains | (old_read_domains << 16);
|
||||
__entry->write_domain = obj->write_domain | (old_write_domain << 16);
|
||||
__entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
|
||||
__entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, read=%04x, write=%04x",
|
||||
|
@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
|
|||
__entry->read_domains, __entry->write_domain)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_object_get_fence,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
|
||||
|
||||
TP_ARGS(obj, fence, tiling_mode),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(int, fence)
|
||||
__field(int, tiling_mode)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->fence = fence;
|
||||
__entry->tiling_mode = tiling_mode;
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, fence=%d, tiling=%d",
|
||||
__entry->obj, __entry->fence, __entry->tiling_mode)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(i915_gem_object,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
|
|||
|
||||
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj)
|
||||
);
|
||||
|
@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_flip_request,
|
||||
TP_PROTO(int plane, struct drm_gem_object *obj),
|
||||
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(plane, obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, plane)
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_flip_complete,
|
||||
TP_PROTO(int plane, struct drm_gem_object *obj),
|
||||
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(plane, obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, plane)
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
|
|||
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_reg_rw,
|
||||
TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
|
||||
|
||||
TP_ARGS(cmd, reg, val, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, cmd)
|
||||
__field(uint32_t, reg)
|
||||
__field(uint64_t, val)
|
||||
__field(int, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cmd = cmd;
|
||||
__entry->reg = reg;
|
||||
__entry->val = (uint64_t)val;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
|
||||
__entry->cmd, __entry->reg, __entry->val, __entry->len)
|
||||
);
|
||||
|
||||
#endif /* _I915_TRACE_H_ */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1442,8 +1442,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
/* Changes to enable or select take place the vblank
|
||||
* after being written.
|
||||
*/
|
||||
intel_wait_for_vblank(intel_dp->base.base.dev,
|
||||
intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
|
||||
|
|
|
@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
|
|||
|
||||
struct intel_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
struct intel_fbdev {
|
||||
|
@ -166,7 +166,7 @@ struct intel_crtc {
|
|||
struct intel_unpin_work *unpin_work;
|
||||
int fdi_lanes;
|
||||
|
||||
struct drm_gem_object *cursor_bo;
|
||||
struct drm_i915_gem_object *cursor_bo;
|
||||
uint32_t cursor_addr;
|
||||
int16_t cursor_x, cursor_y;
|
||||
int16_t cursor_width, cursor_height;
|
||||
|
@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
|
|||
struct intel_unpin_work {
|
||||
struct work_struct work;
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *old_fb_obj;
|
||||
struct drm_gem_object *pending_flip_obj;
|
||||
struct drm_i915_gem_object *old_fb_obj;
|
||||
struct drm_i915_gem_object *pending_flip_obj;
|
||||
struct drm_pending_vblank_event *event;
|
||||
int pending;
|
||||
bool enable_stall_check;
|
||||
|
@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
|
|||
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
|
||||
extern void intel_dvo_init(struct drm_device *dev);
|
||||
extern void intel_tv_init(struct drm_device *dev);
|
||||
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
|
||||
extern void intel_mark_busy(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern bool intel_lvds_init(struct drm_device *dev);
|
||||
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
|
||||
void
|
||||
|
@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
|||
u16 blue, int regno);
|
||||
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno);
|
||||
extern void intel_init_clock_gating(struct drm_device *dev);
|
||||
extern void intel_enable_clock_gating(struct drm_device *dev);
|
||||
extern void intel_disable_clock_gating(struct drm_device *dev);
|
||||
extern void ironlake_enable_drps(struct drm_device *dev);
|
||||
extern void ironlake_disable_drps(struct drm_device *dev);
|
||||
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
|
||||
extern void gen6_disable_rps(struct drm_device *dev);
|
||||
extern void intel_init_emon(struct drm_device *dev);
|
||||
|
||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
bool pipelined);
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
|
||||
extern int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *ifb,
|
||||
struct drm_mode_fb_cmd *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
|
||||
|
|
|
@ -65,10 +65,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_mode_fb_cmd mode_cmd;
|
||||
struct drm_gem_object *fbo = NULL;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
int size, ret;
|
||||
|
||||
/* we don't do packed 24bpp */
|
||||
if (sizes->surface_bpp == 24)
|
||||
|
@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
|
||||
size = mode_cmd.pitch * mode_cmd.height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
fbo = i915_gem_alloc_object(dev, size);
|
||||
if (!fbo) {
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
obj_priv = to_intel_bo(fbo);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Flush everything out, we'll be doing GTT only from now on */
|
||||
ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin fb: %d\n", ret);
|
||||
goto out_unref;
|
||||
|
@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
|
||||
info->par = ifbdev;
|
||||
|
||||
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
|
||||
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
|
@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
else
|
||||
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
|
||||
|
||||
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
|
||||
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
|
||||
info->fix.smem_len = size;
|
||||
|
||||
info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
|
||||
size);
|
||||
info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
|
||||
if (!info->screen_base) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unpin;
|
||||
|
@ -153,13 +150,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
|
||||
// memset(info->screen_base, 0, size);
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
|
||||
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
/* FIXME: we really shouldn't expose mmio space at all */
|
||||
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
|
||||
info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
|
||||
|
||||
info->pixmap.size = 64*1024;
|
||||
info->pixmap.buf_align = 8;
|
||||
info->pixmap.access_align = 32;
|
||||
|
@ -168,7 +160,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
|
||||
fb->width, fb->height,
|
||||
obj_priv->gtt_offset, fbo);
|
||||
obj->gtt_offset, obj);
|
||||
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -176,9 +168,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
|||
return 0;
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_unpin(fbo);
|
||||
i915_gem_object_unpin(obj);
|
||||
out_unref:
|
||||
drm_gem_object_unreference(fbo);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
out:
|
||||
return ret;
|
||||
|
@ -225,7 +217,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
|||
|
||||
drm_framebuffer_cleanup(&ifb->base);
|
||||
if (ifb->obj) {
|
||||
drm_gem_object_unreference_unlocked(ifb->obj);
|
||||
drm_gem_object_unreference_unlocked(&ifb->obj->base);
|
||||
ifb->obj = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
|
|||
|
||||
/* On most chips, these bits must be preserved in software. */
|
||||
if (!IS_I830(dev) && !IS_845G(dev))
|
||||
reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
|
||||
GPIO_CLOCK_PULLUP_DISABLE);
|
||||
reserved = I915_READ_NOTRACE(gpio->reg) &
|
||||
(GPIO_DATA_PULLUP_DISABLE |
|
||||
GPIO_CLOCK_PULLUP_DISABLE);
|
||||
|
||||
return reserved;
|
||||
}
|
||||
|
@ -96,9 +97,9 @@ static int get_clock(void *data)
|
|||
struct intel_gpio *gpio = data;
|
||||
struct drm_i915_private *dev_priv = gpio->dev_priv;
|
||||
u32 reserved = get_reserved(gpio);
|
||||
I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
|
||||
I915_WRITE(gpio->reg, reserved);
|
||||
return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
|
||||
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
|
||||
I915_WRITE_NOTRACE(gpio->reg, reserved);
|
||||
return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
|
||||
}
|
||||
|
||||
static int get_data(void *data)
|
||||
|
@ -106,9 +107,9 @@ static int get_data(void *data)
|
|||
struct intel_gpio *gpio = data;
|
||||
struct drm_i915_private *dev_priv = gpio->dev_priv;
|
||||
u32 reserved = get_reserved(gpio);
|
||||
I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
|
||||
I915_WRITE(gpio->reg, reserved);
|
||||
return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
|
||||
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
|
||||
I915_WRITE_NOTRACE(gpio->reg, reserved);
|
||||
return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
|
||||
}
|
||||
|
||||
static void set_clock(void *data, int state_high)
|
||||
|
@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
|
|||
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
|
||||
GPIO_CLOCK_VAL_MASK;
|
||||
|
||||
I915_WRITE(gpio->reg, reserved | clock_bits);
|
||||
I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
|
||||
POSTING_READ(gpio->reg);
|
||||
}
|
||||
|
||||
|
@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
|
|||
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
|
||||
GPIO_DATA_VAL_MASK;
|
||||
|
||||
I915_WRITE(gpio->reg, reserved | data_bits);
|
||||
I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
|
||||
POSTING_READ(gpio->reg);
|
||||
}
|
||||
|
||||
|
|
|
@ -304,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
|
|||
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
|
||||
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
|
||||
|
||||
pfit_control |= PFIT_ENABLE;
|
||||
/* 965+ is easy, it does everything in hw */
|
||||
if (scaled_width > scaled_height)
|
||||
pfit_control |= PFIT_SCALING_PILLAR;
|
||||
pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
|
||||
else if (scaled_width < scaled_height)
|
||||
pfit_control |= PFIT_SCALING_LETTER;
|
||||
else
|
||||
pfit_control |= PFIT_SCALING_AUTO;
|
||||
pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
|
||||
else if (adjusted_mode->hdisplay != mode->hdisplay)
|
||||
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
|
||||
} else {
|
||||
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
|
||||
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
|
||||
|
@ -358,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
|
|||
* Full scaling, even if it changes the aspect ratio.
|
||||
* Fortunately this is all done for us in hw.
|
||||
*/
|
||||
pfit_control |= PFIT_ENABLE;
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
pfit_control |= PFIT_SCALING_AUTO;
|
||||
else
|
||||
pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
if (mode->vdisplay != adjusted_mode->vdisplay ||
|
||||
mode->hdisplay != adjusted_mode->hdisplay) {
|
||||
pfit_control |= PFIT_ENABLE;
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
pfit_control |= PFIT_SCALING_AUTO;
|
||||
else
|
||||
pfit_control |= (VERT_AUTO_SCALE |
|
||||
VERT_INTERP_BILINEAR |
|
||||
HORIZ_AUTO_SCALE |
|
||||
HORIZ_INTERP_BILINEAR);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -914,6 +917,8 @@ bool intel_lvds_init(struct drm_device *dev)
|
|||
|
||||
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
|
||||
intel_encoder->crtc_mask = (1 << 1);
|
||||
if (INTEL_INFO(dev)->gen >= 5)
|
||||
intel_encoder->crtc_mask |= (1 << 0);
|
||||
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
|
||||
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
|
||||
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
|
||||
|
@ -1019,10 +1024,18 @@ bool intel_lvds_init(struct drm_device *dev)
|
|||
out:
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
u32 pwm;
|
||||
/* make sure PWM is enabled */
|
||||
|
||||
pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
|
||||
|
||||
/* make sure PWM is enabled and locked to the LVDS pipe */
|
||||
pwm = I915_READ(BLC_PWM_CPU_CTL2);
|
||||
pwm |= (PWM_ENABLE | PWM_PIPE_B);
|
||||
I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
|
||||
if (pipe == 0 && (pwm & PWM_PIPE_B))
|
||||
I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
|
||||
if (pipe)
|
||||
pwm |= PWM_PIPE_B;
|
||||
else
|
||||
pwm &= ~PWM_PIPE_B;
|
||||
I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
|
||||
|
||||
pwm = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
pwm |= PWM_PCH_ENABLE;
|
||||
|
|
|
@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
|
|||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
|
||||
if (asle) {
|
||||
if (IS_MOBILE(dev)) {
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
if (IS_MOBILE(dev))
|
||||
intel_enable_asle(dev);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock,
|
||||
irqflags);
|
||||
}
|
||||
|
||||
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
|
||||
ASLE_PFMB_EN;
|
||||
|
|
|
@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
|||
int ret;
|
||||
|
||||
BUG_ON(overlay->last_flip_req);
|
||||
overlay->last_flip_req =
|
||||
i915_add_request(dev, NULL, request, &dev_priv->render_ring);
|
||||
if (overlay->last_flip_req == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
overlay->last_flip_req = request->seqno;
|
||||
overlay->flip_tail = tail;
|
||||
ret = i915_do_wait_request(dev,
|
||||
overlay->last_flip_req, true,
|
||||
&dev_priv->render_ring);
|
||||
LP_RING(dev_priv));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
|
|||
static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
int pipe_a_quirk = 0;
|
||||
int ret;
|
||||
|
@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
goto out;
|
||||
}
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
ret = BEGIN_LP_RING(4);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
goto out;
|
||||
}
|
||||
|
||||
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
OUT_RING(overlay->flip_addr | OFC_UPDATE);
|
||||
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
|
@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
struct drm_i915_gem_request *request;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!overlay->active);
|
||||
|
||||
|
@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
if (tmp & (1 << 17))
|
||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
BEGIN_LP_RING(2);
|
||||
ret = BEGIN_LP_RING(2);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
OUT_RING(flip_addr);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
overlay->last_flip_req =
|
||||
i915_add_request(dev, NULL, request, &dev_priv->render_ring);
|
||||
ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
overlay->last_flip_req = request->seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_gem_object *obj = &overlay->old_vid_bo->base;
|
||||
struct drm_i915_gem_object *obj = overlay->old_vid_bo;
|
||||
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
|
||||
overlay->old_vid_bo = NULL;
|
||||
}
|
||||
|
||||
static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj = overlay->vid_bo;
|
||||
|
||||
/* never have the overlay hw on without showing a frame */
|
||||
BUG_ON(!overlay->vid_bo);
|
||||
obj = &overlay->vid_bo->base;
|
||||
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
overlay->vid_bo = NULL;
|
||||
|
||||
overlay->crtc->overlay = NULL;
|
||||
|
@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
|
|||
bool interruptible)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!overlay->active);
|
||||
|
||||
|
@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
|
|||
* of the hw. Do it in both cases */
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
BEGIN_LP_RING(6);
|
||||
ret = BEGIN_LP_RING(6);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
/* wait for overlay to go idle */
|
||||
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
OUT_RING(flip_addr);
|
||||
|
@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
|
|||
return 0;
|
||||
|
||||
ret = i915_do_wait_request(dev, overlay->last_flip_req,
|
||||
interruptible, &dev_priv->render_ring);
|
||||
interruptible, LP_RING(dev_priv));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
BEGIN_LP_RING(2);
|
||||
ret = BEGIN_LP_RING(2);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
OUT_RING(MI_NOOP);
|
||||
ADVANCE_LP_RING();
|
||||
|
@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
|
|||
}
|
||||
|
||||
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
struct drm_gem_object *new_bo,
|
||||
struct drm_i915_gem_object *new_bo,
|
||||
struct put_image_params *params)
|
||||
{
|
||||
int ret, tmp_width;
|
||||
struct overlay_registers *regs;
|
||||
bool scale_changed = false;
|
||||
struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
|
||||
struct drm_device *dev = overlay->dev;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
|
||||
ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
|
@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
if (ret != 0)
|
||||
goto out_unpin;
|
||||
|
||||
ret = i915_gem_object_put_fence(new_bo);
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
if (!overlay->active) {
|
||||
regs = intel_overlay_map_regs(overlay);
|
||||
if (!regs) {
|
||||
|
@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
|
||||
params->offset_Y, tmp_width);
|
||||
regs->SHEIGHT = params->src_h;
|
||||
regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
|
||||
regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
|
||||
regs->OSTRIDE = params->stride_Y;
|
||||
|
||||
if (params->format & I915_OVERLAY_YUV_PLANAR) {
|
||||
|
@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
params->src_w/uv_hscale);
|
||||
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
|
||||
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
|
||||
regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
|
||||
regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
|
||||
regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
|
||||
regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
|
||||
regs->OSTRIDE |= params->stride_UV << 16;
|
||||
}
|
||||
|
||||
|
@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
goto out_unpin;
|
||||
|
||||
overlay->old_vid_bo = overlay->vid_bo;
|
||||
overlay->vid_bo = to_intel_bo(new_bo);
|
||||
overlay->vid_bo = new_bo;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
|
|||
|
||||
static int check_overlay_src(struct drm_device *dev,
|
||||
struct drm_intel_overlay_put_image *rec,
|
||||
struct drm_gem_object *new_bo)
|
||||
struct drm_i915_gem_object *new_bo)
|
||||
{
|
||||
int uv_hscale = uv_hsubsampling(rec->flags);
|
||||
int uv_vscale = uv_vsubsampling(rec->flags);
|
||||
|
@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
tmp = rec->stride_Y*rec->src_height;
|
||||
if (rec->offset_Y + tmp > new_bo->size)
|
||||
if (rec->offset_Y + tmp > new_bo->base.size)
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
||||
|
@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
tmp = rec->stride_Y * rec->src_height;
|
||||
if (rec->offset_Y + tmp > new_bo->size)
|
||||
if (rec->offset_Y + tmp > new_bo->base.size)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
|
||||
if (rec->offset_U + tmp > new_bo->size ||
|
||||
rec->offset_V + tmp > new_bo->size)
|
||||
if (rec->offset_U + tmp > new_bo->base.size ||
|
||||
rec->offset_V + tmp > new_bo->base.size)
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
|||
struct intel_overlay *overlay;
|
||||
struct drm_mode_object *drmmode_obj;
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_gem_object *new_bo;
|
||||
struct drm_i915_gem_object *new_bo;
|
||||
struct put_image_params *params;
|
||||
int ret;
|
||||
|
||||
|
@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
|||
}
|
||||
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
|
||||
|
||||
new_bo = drm_gem_object_lookup(dev, file_priv,
|
||||
put_image_rec->bo_handle);
|
||||
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
|
||||
put_image_rec->bo_handle));
|
||||
if (!new_bo) {
|
||||
ret = -ENOENT;
|
||||
goto out_free;
|
||||
|
@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
|||
mutex_lock(&dev->mode_config.mutex);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (new_bo->tiling_mode) {
|
||||
DRM_ERROR("buffer used for overlay image can not be tiled\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = intel_overlay_recover_from_interrupt(overlay, true);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
|
@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
|||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_gem_object_unreference_unlocked(new_bo);
|
||||
drm_gem_object_unreference_unlocked(&new_bo->base);
|
||||
out_free:
|
||||
kfree(params);
|
||||
|
||||
|
@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
|||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_overlay *overlay;
|
||||
struct drm_gem_object *reg_bo;
|
||||
struct drm_i915_gem_object *reg_bo;
|
||||
struct overlay_registers *regs;
|
||||
int ret;
|
||||
|
||||
|
@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
|||
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
|
||||
if (!reg_bo)
|
||||
goto out_free;
|
||||
overlay->reg_bo = to_intel_bo(reg_bo);
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
||||
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
|
||||
ret = i915_gem_attach_phys_object(dev, reg_bo,
|
||||
|
@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
|
|||
DRM_ERROR("failed to attach phys overlay regs\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
|
||||
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
|
||||
} else {
|
||||
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
|
||||
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin overlay register bo\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = overlay->reg_bo->gtt_offset;
|
||||
overlay->flip_addr = reg_bo->gtt_offset;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
|
||||
if (ret) {
|
||||
|
@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
|||
out_unpin_bo:
|
||||
i915_gem_object_unpin(reg_bo);
|
||||
out_free_bo:
|
||||
drm_gem_object_unreference(reg_bo);
|
||||
drm_gem_object_unreference(®_bo->base);
|
||||
out_free:
|
||||
kfree(overlay);
|
||||
return;
|
||||
|
|
|
@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Restore the CTL value if it lost, e.g. GPU reset */
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev)) {
|
||||
val = I915_READ(BLC_PWM_PCH_CTL2);
|
||||
if (dev_priv->saveBLC_PWM_CTL2 == 0) {
|
||||
dev_priv->saveBLC_PWM_CTL2 = val;
|
||||
} else if (val == 0) {
|
||||
I915_WRITE(BLC_PWM_PCH_CTL2,
|
||||
dev_priv->saveBLC_PWM_CTL);
|
||||
val = dev_priv->saveBLC_PWM_CTL;
|
||||
}
|
||||
} else {
|
||||
val = I915_READ(BLC_PWM_CTL);
|
||||
if (dev_priv->saveBLC_PWM_CTL == 0) {
|
||||
dev_priv->saveBLC_PWM_CTL = val;
|
||||
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
|
||||
} else if (val == 0) {
|
||||
I915_WRITE(BLC_PWM_CTL,
|
||||
dev_priv->saveBLC_PWM_CTL);
|
||||
I915_WRITE(BLC_PWM_CTL2,
|
||||
dev_priv->saveBLC_PWM_CTL2);
|
||||
val = dev_priv->saveBLC_PWM_CTL;
|
||||
}
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
u32 intel_panel_get_max_backlight(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 max;
|
||||
|
||||
max = i915_read_blc_pwm_ctl(dev_priv);
|
||||
if (max == 0) {
|
||||
/* XXX add code here to query mode clock or hardware clock
|
||||
* and program max PWM appropriately.
|
||||
*/
|
||||
printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
|
||||
max >>= 16;
|
||||
} else {
|
||||
max = I915_READ(BLC_PWM_CTL);
|
||||
if (IS_PINEVIEW(dev)) {
|
||||
max >>= 17;
|
||||
} else {
|
||||
|
@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
|
|||
max *= 0xff;
|
||||
}
|
||||
|
||||
if (max == 0) {
|
||||
/* XXX add code here to query mode clock or hardware clock
|
||||
* and program max PWM appropriately.
|
||||
*/
|
||||
DRM_ERROR("fixme: max PWM is zero.\n");
|
||||
max = 1;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
|
||||
return max;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,22 +1,37 @@
|
|||
#ifndef _INTEL_RINGBUFFER_H_
|
||||
#define _INTEL_RINGBUFFER_H_
|
||||
|
||||
struct intel_hw_status_page {
|
||||
void *page_addr;
|
||||
unsigned int gfx_addr;
|
||||
struct drm_gem_object *obj;
|
||||
enum {
|
||||
RCS = 0x0,
|
||||
VCS,
|
||||
BCS,
|
||||
I915_NUM_RINGS,
|
||||
};
|
||||
|
||||
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
|
||||
struct intel_hw_status_page {
|
||||
u32 __iomem *page_addr;
|
||||
unsigned int gfx_addr;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
|
||||
|
||||
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
|
||||
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
|
||||
#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
|
||||
|
||||
#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
|
||||
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
|
||||
#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
|
||||
|
||||
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
|
||||
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
|
||||
#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
|
||||
|
||||
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
|
||||
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
|
||||
|
||||
struct drm_i915_gem_execbuffer2;
|
||||
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
|
||||
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
|
||||
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
|
||||
|
||||
struct intel_ring_buffer {
|
||||
const char *name;
|
||||
enum intel_ring_id {
|
||||
|
@ -25,45 +40,36 @@ struct intel_ring_buffer {
|
|||
RING_BLT = 0x4,
|
||||
} id;
|
||||
u32 mmio_base;
|
||||
unsigned long size;
|
||||
void *virtual_start;
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *gem_object;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
u32 actual_head;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
int space;
|
||||
int size;
|
||||
struct intel_hw_status_page status_page;
|
||||
|
||||
u32 irq_gem_seqno; /* last seq seem at irq time */
|
||||
u32 waiting_gem_seqno;
|
||||
int user_irq_refcount;
|
||||
void (*user_irq_get)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
void (*user_irq_put)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
u32 irq_seqno; /* last seq seem at irq time */
|
||||
u32 waiting_seqno;
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
atomic_t irq_refcount;
|
||||
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||
|
||||
int (*init)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
int (*init)(struct intel_ring_buffer *ring);
|
||||
|
||||
void (*write_tail)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
void (*write_tail)(struct intel_ring_buffer *ring,
|
||||
u32 value);
|
||||
void (*flush)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
u32 (*add_request)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 flush_domains);
|
||||
u32 (*get_seqno)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
int (*dispatch_gem_execbuffer)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset);
|
||||
void (*flush)(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct intel_ring_buffer *ring,
|
||||
u32 *seqno);
|
||||
u32 (*get_seqno)(struct intel_ring_buffer *ring);
|
||||
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 length);
|
||||
void (*cleanup)(struct intel_ring_buffer *ring);
|
||||
|
||||
/**
|
||||
|
@ -96,7 +102,7 @@ struct intel_ring_buffer {
|
|||
/**
|
||||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
bool outstanding_lazy_request;
|
||||
u32 outstanding_lazy_request;
|
||||
|
||||
wait_queue_head_t irq_queue;
|
||||
drm_local_map_t map;
|
||||
|
@ -105,44 +111,54 @@ struct intel_ring_buffer {
|
|||
};
|
||||
|
||||
static inline u32
|
||||
intel_read_status_page(struct intel_ring_buffer *ring,
|
||||
int reg)
|
||||
intel_ring_sync_index(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *other)
|
||||
{
|
||||
u32 *regs = ring->status_page.page_addr;
|
||||
return regs[reg];
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* cs -> 0 = vcs, 1 = bcs
|
||||
* vcs -> 0 = bcs, 1 = cs,
|
||||
* bcs -> 0 = cs, 1 = vcs.
|
||||
*/
|
||||
|
||||
idx = (other - ring) - 1;
|
||||
if (idx < 0)
|
||||
idx += I915_NUM_RINGS;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
int intel_init_ring_buffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
void intel_cleanup_ring_buffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
int intel_wait_ring_buffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring, int n);
|
||||
void intel_ring_begin(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring, int n);
|
||||
|
||||
static inline void intel_ring_emit(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
unsigned int data)
|
||||
static inline u32
|
||||
intel_read_status_page(struct intel_ring_buffer *ring,
|
||||
int reg)
|
||||
{
|
||||
unsigned int *virt = ring->virtual_start + ring->tail;
|
||||
*virt = data;
|
||||
return ioread32(ring->status_page.page_addr + reg);
|
||||
}
|
||||
|
||||
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
|
||||
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
|
||||
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
|
||||
|
||||
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
u32 data)
|
||||
{
|
||||
iowrite32(data, ring->virtual_start + ring->tail);
|
||||
ring->tail += 4;
|
||||
}
|
||||
|
||||
void intel_ring_advance(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
|
||||
u32 intel_ring_get_seqno(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
|
||||
int intel_ring_sync(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *to,
|
||||
u32 seqno);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
|
||||
u32 intel_ring_get_active_head(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
void intel_ring_setup_status_page(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
|
||||
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
|
@ -1045,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
|||
|
||||
/* Set the SDVO control regs. */
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
sdvox = SDVO_BORDER_ENABLE;
|
||||
sdvox = 0;
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
sdvox |= SDVO_BORDER_ENABLE;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
|
@ -1075,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
|||
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
|
||||
}
|
||||
|
||||
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
|
||||
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
|
||||
INTEL_INFO(dev)->gen < 5)
|
||||
sdvox |= SDVO_STALL_SELECT;
|
||||
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
|
||||
}
|
||||
|
|
|
@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
|
|||
int type;
|
||||
|
||||
/* Disable TV interrupts around load detect or we'll recurse */
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_disable_pipestat(dev_priv, 0,
|
||||
PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
save_tv_dac = tv_dac = I915_READ(TV_DAC);
|
||||
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
|
||||
|
@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
|
|||
I915_WRITE(TV_CTL, save_tv_ctl);
|
||||
|
||||
/* Restore interrupt config */
|
||||
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, 0,
|
||||
PIPE_HOTPLUG_INTERRUPT_ENABLE |
|
||||
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
|
||||
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
return type;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ config DRM_NOUVEAU
|
|||
select FB
|
||||
select FRAMEBUFFER_CONSOLE if !EMBEDDED
|
||||
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
|
||||
select ACPI_VIDEO if ACPI
|
||||
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
|
||||
help
|
||||
Choose this option for open-source nVidia support.
|
||||
|
||||
|
|
|
@ -5,27 +5,32 @@
|
|||
ccflags-y := -Iinclude/drm
|
||||
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
|
||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
|
||||
nouveau_sgdma.o nouveau_dma.o \
|
||||
nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
|
||||
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
|
||||
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
|
||||
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
|
||||
nouveau_dp.o nouveau_ramht.o \
|
||||
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
|
||||
nouveau_mm.o nouveau_vm.o \
|
||||
nv04_timer.o \
|
||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
|
||||
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
|
||||
nv04_graph.o nv10_graph.o nv20_graph.o \
|
||||
nv40_graph.o nv50_graph.o nvc0_graph.o \
|
||||
nv40_grctx.o nv50_grctx.o \
|
||||
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
|
||||
nv84_crypt.o \
|
||||
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
|
||||
nv50_crtc.o nv50_dac.o nv50_sor.o \
|
||||
nv50_cursor.o nv50_display.o nv50_fbcon.o \
|
||||
nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
|
||||
nv50_cursor.o nv50_display.o \
|
||||
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
|
||||
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
|
||||
nv04_crtc.o nv04_display.o nv04_cursor.o \
|
||||
nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
|
||||
nv10_gpio.o nv50_gpio.o \
|
||||
nv50_calc.o \
|
||||
nv04_pm.o nv50_pm.o nva3_pm.o
|
||||
nv04_pm.o nv50_pm.o nva3_pm.o \
|
||||
nv50_vram.o nvc0_vram.o \
|
||||
nv50_vm.o nvc0_vm.o
|
||||
|
||||
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
|
||||
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
|
||||
|
|
|
@ -130,10 +130,15 @@ static int nouveau_dsm_init(void)
|
|||
|
||||
static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
|
||||
{
|
||||
if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
|
||||
/* easy option one - intel vendor ID means Integrated */
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
|
||||
return VGA_SWITCHEROO_IGD;
|
||||
else
|
||||
return VGA_SWITCHEROO_DIS;
|
||||
|
||||
/* is this device on Bus 0? - this may need improving */
|
||||
if (pdev->bus->number == 0)
|
||||
return VGA_SWITCHEROO_IGD;
|
||||
|
||||
return VGA_SWITCHEROO_DIS;
|
||||
}
|
||||
|
||||
static struct vga_switcheroo_handler nouveau_dsm_handler = {
|
||||
|
|
|
@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
|
|||
return entry;
|
||||
}
|
||||
|
||||
static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
|
||||
static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
|
||||
int heads, int or)
|
||||
{
|
||||
struct dcb_entry *entry = new_dcb_entry(dcb);
|
||||
|
||||
entry->type = 0;
|
||||
entry->type = type;
|
||||
entry->i2c_index = i2c;
|
||||
entry->heads = heads;
|
||||
entry->location = DCB_LOC_ON_CHIP;
|
||||
entry->or = 1;
|
||||
}
|
||||
|
||||
static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
|
||||
{
|
||||
struct dcb_entry *entry = new_dcb_entry(dcb);
|
||||
|
||||
entry->type = 2;
|
||||
entry->i2c_index = LEGACY_I2C_PANEL;
|
||||
entry->heads = twoHeads ? 3 : 1;
|
||||
entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
|
||||
entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
|
||||
entry->duallink_possible = false; /* SiI164 and co. are single link */
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* For dvi-a either crtc probably works, but my card appears to only
|
||||
* support dvi-d. "nvidia" still attempts to program it for dvi-a,
|
||||
* doing the full fp output setup (program 0x6808.. fp dimension regs,
|
||||
* setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
|
||||
* the monitor picks up the mode res ok and lights up, but no pixel
|
||||
* data appears, so the board manufacturer probably connected up the
|
||||
* sync lines, but missed the video traces / components
|
||||
*
|
||||
* with this introduction, dvi-a left as an exercise for the reader.
|
||||
*/
|
||||
fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
|
||||
{
|
||||
struct dcb_entry *entry = new_dcb_entry(dcb);
|
||||
|
||||
entry->type = 1;
|
||||
entry->i2c_index = LEGACY_I2C_TV;
|
||||
entry->heads = twoHeads ? 3 : 1;
|
||||
entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
|
||||
if (type != OUTPUT_ANALOG)
|
||||
entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
|
||||
entry->or = or;
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
|
||||
{
|
||||
struct dcb_table *dcb = &bios->dcb;
|
||||
int all_heads = (nv_two_heads(dev) ? 3 : 1);
|
||||
|
||||
#ifdef __powerpc__
|
||||
/* Apple iMac G4 NV17 */
|
||||
if (of_machine_is_compatible("PowerMac4,5")) {
|
||||
fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
|
||||
fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Make up some sane defaults */
|
||||
fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
|
||||
|
||||
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
||||
fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
|
||||
all_heads, 0);
|
||||
|
||||
else if (bios->tmds.output0_script_ptr ||
|
||||
bios->tmds.output1_script_ptr)
|
||||
fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
|
||||
all_heads, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
|
||||
parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct dcb_table *dcb = &bios->dcb;
|
||||
|
@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
|
|||
|
||||
/* this situation likely means a really old card, pre DCB */
|
||||
if (dcbptr == 0x0) {
|
||||
NV_INFO(dev, "Assuming a CRT output exists\n");
|
||||
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
|
||||
|
||||
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
||||
fabricate_tv_output(dcb, twoHeads);
|
||||
|
||||
fabricate_dcb_encoder_table(dev, bios);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
|
|||
*/
|
||||
NV_TRACEWARN(dev, "No useful information in BIOS output table; "
|
||||
"adding all possible outputs\n");
|
||||
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
|
||||
|
||||
/*
|
||||
* Attempt to detect TV before DVI because the test
|
||||
* for the former is more accurate and it rules the
|
||||
* latter out.
|
||||
*/
|
||||
if (nv04_tv_identify(dev,
|
||||
bios->legacy.i2c_indices.tv) >= 0)
|
||||
fabricate_tv_output(dcb, twoHeads);
|
||||
|
||||
else if (bios->tmds.output0_script_ptr ||
|
||||
bios->tmds.output1_script_ptr)
|
||||
fabricate_dvi_i_output(dcb, twoHeads);
|
||||
|
||||
fabricate_dcb_encoder_table(dev, bios);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
|
||||
ret = parse_dcb_table(dev, bios);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_mm.h"
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
#include <linux/log2.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
|||
if (unlikely(nvbo->gem))
|
||||
DRM_ERROR("bo %p still attached to GEM object\n", bo);
|
||||
|
||||
if (nvbo->tile)
|
||||
nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
|
||||
|
||||
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
|
||||
nouveau_vm_put(&nvbo->vma);
|
||||
kfree(nvbo);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_bo_fixup_align(struct drm_device *dev,
|
||||
uint32_t tile_mode, uint32_t tile_flags,
|
||||
int *align, int *size)
|
||||
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
|
||||
int *page_shift)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
|
||||
|
||||
/*
|
||||
* Some of the tile_flags have a periodic structure of N*4096 bytes,
|
||||
* align to to that as well as the page size. Align the size to the
|
||||
* appropriate boundaries. This does imply that sizes are rounded up
|
||||
* 3-7 pages, so be aware of this and do not waste memory by allocating
|
||||
* many small buffers.
|
||||
*/
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
uint32_t block_size = dev_priv->vram_size >> 15;
|
||||
int i;
|
||||
|
||||
switch (tile_flags) {
|
||||
case 0x1800:
|
||||
case 0x2800:
|
||||
case 0x4800:
|
||||
case 0x7a00:
|
||||
if (is_power_of_2(block_size)) {
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 12 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 8 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
}
|
||||
*size = roundup(*size, *align);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
if (tile_mode) {
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
if (nvbo->tile_mode) {
|
||||
if (dev_priv->chipset >= 0x40) {
|
||||
*align = 65536;
|
||||
*size = roundup(*size, 64 * tile_mode);
|
||||
*size = roundup(*size, 64 * nvbo->tile_mode);
|
||||
|
||||
} else if (dev_priv->chipset >= 0x30) {
|
||||
*align = 32768;
|
||||
*size = roundup(*size, 64 * tile_mode);
|
||||
*size = roundup(*size, 64 * nvbo->tile_mode);
|
||||
|
||||
} else if (dev_priv->chipset >= 0x20) {
|
||||
*align = 16384;
|
||||
*size = roundup(*size, 64 * tile_mode);
|
||||
*size = roundup(*size, 64 * nvbo->tile_mode);
|
||||
|
||||
} else if (dev_priv->chipset >= 0x10) {
|
||||
*align = 16384;
|
||||
*size = roundup(*size, 32 * tile_mode);
|
||||
*size = roundup(*size, 32 * nvbo->tile_mode);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (likely(dev_priv->chan_vm)) {
|
||||
if (*size > 256 * 1024)
|
||||
*page_shift = dev_priv->chan_vm->lpg_shift;
|
||||
else
|
||||
*page_shift = dev_priv->chan_vm->spg_shift;
|
||||
} else {
|
||||
*page_shift = 12;
|
||||
}
|
||||
|
||||
*size = roundup(*size, (1 << *page_shift));
|
||||
*align = max((1 << *page_shift), *align);
|
||||
}
|
||||
|
||||
/* ALIGN works only on powers of two. */
|
||||
*size = roundup(*size, PAGE_SIZE);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
*size = roundup(*size, 65536);
|
||||
*align = max(65536, *align);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_bo *nvbo;
|
||||
int ret = 0;
|
||||
int ret = 0, page_shift = 0;
|
||||
|
||||
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
|
||||
if (!nvbo)
|
||||
|
@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
nvbo->tile_flags = tile_flags;
|
||||
nvbo->bo.bdev = &dev_priv->ttm.bdev;
|
||||
|
||||
nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
|
||||
&align, &size);
|
||||
nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
|
||||
align >>= PAGE_SHIFT;
|
||||
|
||||
if (!nvbo->no_vm && dev_priv->chan_vm) {
|
||||
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
|
||||
NV_MEM_ACCESS_RW, &nvbo->vma);
|
||||
if (ret) {
|
||||
kfree(nvbo);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
nouveau_bo_placement_set(nvbo, flags, 0);
|
||||
|
||||
nvbo->channel = chan;
|
||||
|
@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
|
|||
}
|
||||
nvbo->channel = NULL;
|
||||
|
||||
if (nvbo->vma.node) {
|
||||
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
|
||||
nvbo->bo.offset = nvbo->vma.offset;
|
||||
}
|
||||
|
||||
*pnvbo = nvbo;
|
||||
return 0;
|
||||
}
|
||||
|
@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
|
|||
|
||||
nouveau_bo_placement_set(nvbo, memtype, 0);
|
||||
|
||||
ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
|
||||
ret = nouveau_bo_validate(nvbo, false, false, false);
|
||||
if (ret == 0) {
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
|
@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
|
|||
|
||||
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
|
||||
|
||||
ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
|
||||
ret = nouveau_bo_validate(nvbo, false, false, false);
|
||||
if (ret == 0) {
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
|
@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
|
|||
ttm_bo_kunmap(&nvbo->kmap);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
|
||||
bool no_wait_reserve, bool no_wait_gpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
|
||||
no_wait_reserve, no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvbo->vma.node) {
|
||||
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
|
||||
nvbo->bo.offset = nvbo->vma.offset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u16
|
||||
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
|
||||
{
|
||||
|
@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
man->func = &ttm_bo_manager_func;
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
man->func = &nouveau_vram_manager;
|
||||
man->io_reserve_fastpath = false;
|
||||
man->use_io_reserve_lru = true;
|
||||
} else {
|
||||
man->func = &ttm_bo_manager_func;
|
||||
}
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
if (dev_priv->card_type == NV_50)
|
||||
man->gpu_offset = 0x40000000;
|
||||
else
|
||||
man->gpu_offset = 0;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
man->func = &ttm_bo_manager_func;
|
||||
switch (dev_priv->gart_info.type) {
|
||||
case NOUVEAU_GART_AGP:
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED;
|
||||
man->default_caching = TTM_PL_FLAG_UNCACHED;
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
break;
|
||||
case NOUVEAU_GART_SGDMA:
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
|
||||
TTM_MEMTYPE_FLAG_CMA;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
man->gpu_offset = dev_priv->gart_info.aper_base;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "Unknown GART type: %d\n",
|
||||
dev_priv->gart_info.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
man->gpu_offset = dev_priv->vm_gart_base;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
|
||||
|
@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvbo->channel) {
|
||||
ret = nouveau_fence_sync(fence, nvbo->channel);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
|
||||
no_wait_reserve, no_wait_gpu, new_mem);
|
||||
out:
|
||||
nouveau_fence_unref((void *)&fence);
|
||||
nouveau_fence_unref(&fence);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -515,6 +514,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
|
|||
return chan->vram_handle;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
u64 src_offset = old_mem->start << PAGE_SHIFT;
|
||||
u64 dst_offset = new_mem->start << PAGE_SHIFT;
|
||||
u32 page_count = new_mem->num_pages;
|
||||
int ret;
|
||||
|
||||
if (!nvbo->no_vm) {
|
||||
if (old_mem->mem_type == TTM_PL_VRAM)
|
||||
src_offset = nvbo->vma.offset;
|
||||
else
|
||||
src_offset += dev_priv->gart_info.aper_base;
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_VRAM)
|
||||
dst_offset = nvbo->vma.offset;
|
||||
else
|
||||
dst_offset += dev_priv->gart_info.aper_base;
|
||||
}
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
ret = RING_SPACE(chan, 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
|
||||
OUT_RING (chan, upper_32_bits(dst_offset));
|
||||
OUT_RING (chan, lower_32_bits(dst_offset));
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
|
||||
OUT_RING (chan, upper_32_bits(src_offset));
|
||||
OUT_RING (chan, lower_32_bits(src_offset));
|
||||
OUT_RING (chan, PAGE_SIZE); /* src_pitch */
|
||||
OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
|
||||
OUT_RING (chan, PAGE_SIZE); /* line_length */
|
||||
OUT_RING (chan, line_count);
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
|
||||
OUT_RING (chan, 0x00100110);
|
||||
|
||||
page_count -= line_count;
|
||||
src_offset += (PAGE_SIZE * line_count);
|
||||
dst_offset += (PAGE_SIZE * line_count);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
|
@ -529,14 +580,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
dst_offset = new_mem->start << PAGE_SHIFT;
|
||||
if (!nvbo->no_vm) {
|
||||
if (old_mem->mem_type == TTM_PL_VRAM)
|
||||
src_offset += dev_priv->vm_vram_base;
|
||||
src_offset = nvbo->vma.offset;
|
||||
else
|
||||
src_offset += dev_priv->vm_gart_base;
|
||||
src_offset += dev_priv->gart_info.aper_base;
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_VRAM)
|
||||
dst_offset += dev_priv->vm_vram_base;
|
||||
dst_offset = nvbo->vma.offset;
|
||||
else
|
||||
dst_offset += dev_priv->vm_gart_base;
|
||||
dst_offset += dev_priv->gart_info.aper_base;
|
||||
}
|
||||
|
||||
ret = RING_SPACE(chan, 3);
|
||||
|
@ -683,17 +734,27 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
|||
int ret;
|
||||
|
||||
chan = nvbo->channel;
|
||||
if (!chan || nvbo->no_vm)
|
||||
if (!chan || nvbo->no_vm) {
|
||||
chan = dev_priv->channel;
|
||||
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
|
||||
}
|
||||
|
||||
if (dev_priv->card_type < NV_50)
|
||||
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
else
|
||||
ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
|
||||
no_wait_reserve,
|
||||
no_wait_gpu, new_mem);
|
||||
}
|
||||
|
||||
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
|
||||
if (chan == dev_priv->channel)
|
||||
mutex_unlock(&chan->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -771,7 +832,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
|
|||
struct drm_device *dev = dev_priv->dev;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
uint64_t offset;
|
||||
int ret;
|
||||
|
||||
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
|
||||
/* Nothing to do. */
|
||||
|
@ -781,18 +841,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
|
|||
|
||||
offset = new_mem->start << PAGE_SHIFT;
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
ret = nv50_mem_vm_bind_linear(dev,
|
||||
offset + dev_priv->vm_vram_base,
|
||||
new_mem->size,
|
||||
nouveau_bo_tile_layout(nvbo),
|
||||
offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev_priv->chan_vm) {
|
||||
nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
|
||||
} else if (dev_priv->card_type >= NV_10) {
|
||||
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
|
||||
nvbo->tile_mode);
|
||||
nvbo->tile_mode,
|
||||
nvbo->tile_flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -808,9 +862,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
|
|||
|
||||
if (dev_priv->card_type >= NV_10 &&
|
||||
dev_priv->card_type < NV_50) {
|
||||
if (*old_tile)
|
||||
nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
|
||||
|
||||
nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
|
||||
*old_tile = new_tile;
|
||||
}
|
||||
}
|
||||
|
@ -879,6 +931,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int ret;
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.offset = 0;
|
||||
|
@ -901,9 +954,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|||
#endif
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
{
|
||||
struct nouveau_vram *vram = mem->mm_node;
|
||||
u8 page_shift;
|
||||
|
||||
if (!dev_priv->bar1_vm) {
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = pci_resource_start(dev->pdev, 1);
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type == NV_C0)
|
||||
page_shift = vram->page_shift;
|
||||
else
|
||||
page_shift = 12;
|
||||
|
||||
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
|
||||
page_shift, NV_MEM_ACCESS_RW,
|
||||
&vram->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(&vram->bar_vma, vram);
|
||||
if (ret) {
|
||||
nouveau_vm_put(&vram->bar_vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mem->bus.offset = vram->bar_vma.offset;
|
||||
if (dev_priv->card_type == NV_50) /*XXX*/
|
||||
mem->bus.offset -= 0x0020000000ULL;
|
||||
mem->bus.base = pci_resource_start(dev->pdev, 1);
|
||||
mem->bus.is_iomem = true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -914,6 +998,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
|||
static void
|
||||
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
|
||||
struct nouveau_vram *vram = mem->mm_node;
|
||||
|
||||
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
|
||||
return;
|
||||
|
||||
if (!vram->bar_vma.node)
|
||||
return;
|
||||
|
||||
nouveau_vm_unmap(&vram->bar_vma);
|
||||
nouveau_vm_put(&vram->bar_vma);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -939,7 +1034,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
nvbo->placement.fpfn = 0;
|
||||
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
|
||||
nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
|
||||
return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
|
||||
return nouveau_bo_validate(nvbo, false, true, false);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
|
||||
{
|
||||
struct nouveau_fence *old_fence;
|
||||
|
||||
if (likely(fence))
|
||||
nouveau_fence_ref(fence);
|
||||
|
||||
spin_lock(&nvbo->bo.bdev->fence_lock);
|
||||
old_fence = nvbo->bo.sync_obj;
|
||||
nvbo->bo.sync_obj = fence;
|
||||
spin_unlock(&nvbo->bo.bdev->fence_lock);
|
||||
|
||||
nouveau_fence_unref(&old_fence);
|
||||
}
|
||||
|
||||
struct ttm_bo_driver nouveau_bo_driver = {
|
||||
|
@ -949,11 +1060,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
|
|||
.evict_flags = nouveau_bo_evict_flags,
|
||||
.move = nouveau_bo_move,
|
||||
.verify_access = nouveau_bo_verify_access,
|
||||
.sync_obj_signaled = nouveau_fence_signalled,
|
||||
.sync_obj_wait = nouveau_fence_wait,
|
||||
.sync_obj_flush = nouveau_fence_flush,
|
||||
.sync_obj_unref = nouveau_fence_unref,
|
||||
.sync_obj_ref = nouveau_fence_ref,
|
||||
.sync_obj_signaled = __nouveau_fence_signalled,
|
||||
.sync_obj_wait = __nouveau_fence_wait,
|
||||
.sync_obj_flush = __nouveau_fence_flush,
|
||||
.sync_obj_unref = __nouveau_fence_unref,
|
||||
.sync_obj_ref = __nouveau_fence_ref,
|
||||
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
|
||||
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
|
||||
.io_mem_free = &nouveau_ttm_io_mem_free,
|
||||
|
|
|
@ -38,23 +38,28 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
|
|||
int ret;
|
||||
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
|
||||
dev_priv->vm_end, NV_DMA_ACCESS_RO,
|
||||
NV_DMA_TARGET_AGP, &pushbuf);
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
ret = nouveau_gpuobj_dma_new(chan,
|
||||
NV_CLASS_DMA_IN_MEMORY, 0,
|
||||
(1ULL << 40),
|
||||
NV_MEM_ACCESS_RO,
|
||||
NV_MEM_TARGET_VM,
|
||||
&pushbuf);
|
||||
}
|
||||
chan->pushbuf_base = pb->bo.offset;
|
||||
} else
|
||||
if (pb->bo.mem.mem_type == TTM_PL_TT) {
|
||||
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
|
||||
dev_priv->gart_info.aper_size,
|
||||
NV_DMA_ACCESS_RO, &pushbuf,
|
||||
NULL);
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
|
||||
dev_priv->gart_info.aper_size,
|
||||
NV_MEM_ACCESS_RO,
|
||||
NV_MEM_TARGET_GART, &pushbuf);
|
||||
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
|
||||
} else
|
||||
if (dev_priv->card_type != NV_04) {
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
|
||||
dev_priv->fb_available_size,
|
||||
NV_DMA_ACCESS_RO,
|
||||
NV_DMA_TARGET_VIDMEM, &pushbuf);
|
||||
NV_MEM_ACCESS_RO,
|
||||
NV_MEM_TARGET_VRAM, &pushbuf);
|
||||
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
|
||||
} else {
|
||||
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
|
||||
|
@ -62,17 +67,16 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
|
|||
* VRAM.
|
||||
*/
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
pci_resource_start(dev->pdev,
|
||||
1),
|
||||
pci_resource_start(dev->pdev, 1),
|
||||
dev_priv->fb_available_size,
|
||||
NV_DMA_ACCESS_RO,
|
||||
NV_DMA_TARGET_PCI, &pushbuf);
|
||||
NV_MEM_ACCESS_RO,
|
||||
NV_MEM_TARGET_PCI, &pushbuf);
|
||||
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
|
||||
nouveau_gpuobj_ref(NULL, &pushbuf);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nouveau_bo *
|
||||
|
@ -100,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_map(pushbuf);
|
||||
if (ret) {
|
||||
nouveau_bo_unpin(pushbuf);
|
||||
nouveau_bo_ref(NULL, &pushbuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pushbuf;
|
||||
}
|
||||
|
||||
|
@ -107,74 +118,59 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
|
|||
int
|
||||
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t vram_handle, uint32_t tt_handle)
|
||||
uint32_t vram_handle, uint32_t gart_handle)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_channel *chan;
|
||||
int channel, user;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Alright, here is the full story
|
||||
* Nvidia cards have multiple hw fifo contexts (praise them for that,
|
||||
* no complicated crash-prone context switches)
|
||||
* We allocate a new context for each app and let it write to it
|
||||
* directly (woo, full userspace command submission !)
|
||||
* When there are no more contexts, you lost
|
||||
*/
|
||||
for (channel = 0; channel < pfifo->channels; channel++) {
|
||||
if (dev_priv->fifos[channel] == NULL)
|
||||
break;
|
||||
}
|
||||
|
||||
/* no more fifos. you lost. */
|
||||
if (channel == pfifo->channels)
|
||||
return -EINVAL;
|
||||
|
||||
dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
|
||||
GFP_KERNEL);
|
||||
if (!dev_priv->fifos[channel])
|
||||
/* allocate and lock channel structure */
|
||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
chan = dev_priv->fifos[channel];
|
||||
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
|
||||
INIT_LIST_HEAD(&chan->fence.pending);
|
||||
chan->dev = dev;
|
||||
chan->id = channel;
|
||||
chan->file_priv = file_priv;
|
||||
chan->vram_handle = vram_handle;
|
||||
chan->gart_handle = tt_handle;
|
||||
chan->gart_handle = gart_handle;
|
||||
|
||||
NV_INFO(dev, "Allocating FIFO number %d\n", channel);
|
||||
kref_init(&chan->ref);
|
||||
atomic_set(&chan->users, 1);
|
||||
mutex_init(&chan->mutex);
|
||||
mutex_lock(&chan->mutex);
|
||||
|
||||
/* allocate hw channel id */
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
|
||||
if (!dev_priv->channels.ptr[chan->id]) {
|
||||
nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
||||
if (chan->id == pfifo->channels) {
|
||||
mutex_unlock(&chan->mutex);
|
||||
kfree(chan);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
|
||||
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
|
||||
INIT_LIST_HEAD(&chan->nvsw.flip);
|
||||
INIT_LIST_HEAD(&chan->fence.pending);
|
||||
|
||||
/* Allocate DMA push buffer */
|
||||
chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
|
||||
if (!chan->pushbuf_bo) {
|
||||
ret = -ENOMEM;
|
||||
NV_ERROR(dev, "pushbuf %d\n", ret);
|
||||
nouveau_channel_free(chan);
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nouveau_dma_pre_init(chan);
|
||||
|
||||
/* Locate channel's user control regs */
|
||||
if (dev_priv->card_type < NV_40)
|
||||
user = NV03_USER(channel);
|
||||
else
|
||||
if (dev_priv->card_type < NV_50)
|
||||
user = NV40_USER(channel);
|
||||
else
|
||||
user = NV50_USER(channel);
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
|
||||
PAGE_SIZE);
|
||||
if (!chan->user) {
|
||||
NV_ERROR(dev, "ioremap of regs failed.\n");
|
||||
nouveau_channel_free(chan);
|
||||
return -ENOMEM;
|
||||
}
|
||||
chan->user_put = 0x40;
|
||||
chan->user_get = 0x44;
|
||||
|
||||
|
@ -182,15 +178,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
ret = nouveau_notifier_init_channel(chan);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "ntfy %d\n", ret);
|
||||
nouveau_channel_free(chan);
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Setup channel's default objects */
|
||||
ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
|
||||
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "gpuobj %d\n", ret);
|
||||
nouveau_channel_free(chan);
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -198,24 +194,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
ret = nouveau_channel_pushbuf_ctxdma_init(chan);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "pbctxdma %d\n", ret);
|
||||
nouveau_channel_free(chan);
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* disable the fifo caches */
|
||||
pfifo->reassign(dev, false);
|
||||
|
||||
/* Create a graphics context for new channel */
|
||||
ret = pgraph->create_context(chan);
|
||||
if (ret) {
|
||||
nouveau_channel_free(chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Construct inital RAMFC for new channel */
|
||||
ret = pfifo->create_context(chan);
|
||||
if (ret) {
|
||||
nouveau_channel_free(chan);
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -225,83 +214,111 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
|||
if (!ret)
|
||||
ret = nouveau_fence_channel_init(chan);
|
||||
if (ret) {
|
||||
nouveau_channel_free(chan);
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nouveau_debugfs_channel_init(chan);
|
||||
|
||||
NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
|
||||
NV_DEBUG(dev, "channel %d initialised\n", chan->id);
|
||||
*chan_ret = chan;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* stops a fifo */
|
||||
void
|
||||
nouveau_channel_free(struct nouveau_channel *chan)
|
||||
struct nouveau_channel *
|
||||
nouveau_channel_get_unlocked(struct nouveau_channel *ref)
|
||||
{
|
||||
struct nouveau_channel *chan = NULL;
|
||||
|
||||
if (likely(ref && atomic_inc_not_zero(&ref->users)))
|
||||
nouveau_channel_ref(ref, &chan);
|
||||
|
||||
return chan;
|
||||
}
|
||||
|
||||
struct nouveau_channel *
|
||||
nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_channel *chan;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
|
||||
if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
nouveau_debugfs_channel_fini(chan);
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
||||
/* Give outstanding push buffers a chance to complete */
|
||||
nouveau_fence_update(chan);
|
||||
if (chan->fence.sequence != chan->fence.sequence_ack) {
|
||||
struct nouveau_fence *fence = NULL;
|
||||
if (unlikely(!chan))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = nouveau_fence_new(chan, &fence, true);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_fence_wait(fence, NULL, false, false);
|
||||
nouveau_fence_unref((void *)&fence);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
|
||||
if (unlikely(file_priv && chan->file_priv != file_priv)) {
|
||||
nouveau_channel_put_unlocked(&chan);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Ensure all outstanding fences are signaled. They should be if the
|
||||
mutex_lock(&chan->mutex);
|
||||
return chan;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_channel *chan = *pchan;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
|
||||
unsigned long flags;
|
||||
|
||||
/* decrement the refcount, and we're done if there's still refs */
|
||||
if (likely(!atomic_dec_and_test(&chan->users))) {
|
||||
nouveau_channel_ref(NULL, pchan);
|
||||
return;
|
||||
}
|
||||
|
||||
/* noone wants the channel anymore */
|
||||
NV_DEBUG(dev, "freeing channel %d\n", chan->id);
|
||||
nouveau_debugfs_channel_fini(chan);
|
||||
|
||||
/* give it chance to idle */
|
||||
nouveau_channel_idle(chan);
|
||||
|
||||
/* ensure all outstanding fences are signaled. they should be if the
|
||||
* above attempts at idling were OK, but if we failed this'll tell TTM
|
||||
* we're done with the buffers.
|
||||
*/
|
||||
nouveau_fence_channel_fini(chan);
|
||||
|
||||
/* This will prevent pfifo from switching channels. */
|
||||
/* boot it off the hardware */
|
||||
pfifo->reassign(dev, false);
|
||||
|
||||
/* We want to give pgraph a chance to idle and get rid of all potential
|
||||
* errors. We need to do this before the lock, otherwise the irq handler
|
||||
* is unable to process them.
|
||||
/* We want to give pgraph a chance to idle and get rid of all
|
||||
* potential errors. We need to do this without the context
|
||||
* switch lock held, otherwise the irq handler is unable to
|
||||
* process them.
|
||||
*/
|
||||
if (pgraph->channel(dev) == chan)
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
pgraph->fifo_access(dev, false);
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
pgraph->destroy_context(chan);
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
if (pfifo->channel_id(dev) == chan->id) {
|
||||
pfifo->disable(dev);
|
||||
pfifo->unload_context(dev);
|
||||
pfifo->enable(dev);
|
||||
}
|
||||
/* destroy the engine specific contexts */
|
||||
pfifo->destroy_context(chan);
|
||||
pgraph->destroy_context(chan);
|
||||
if (pcrypt->destroy_context)
|
||||
pcrypt->destroy_context(chan);
|
||||
|
||||
pfifo->reassign(dev, true);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
/* aside from its resources, the channel should now be dead,
|
||||
* remove it from the channel list
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
||||
/* Release the channel's resources */
|
||||
/* destroy any resources the channel owned */
|
||||
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
|
||||
if (chan->pushbuf_bo) {
|
||||
nouveau_bo_unmap(chan->pushbuf_bo);
|
||||
|
@ -310,44 +327,80 @@ nouveau_channel_free(struct nouveau_channel *chan)
|
|||
}
|
||||
nouveau_gpuobj_channel_takedown(chan);
|
||||
nouveau_notifier_takedown_channel(chan);
|
||||
if (chan->user)
|
||||
iounmap(chan->user);
|
||||
|
||||
dev_priv->fifos[chan->id] = NULL;
|
||||
nouveau_channel_ref(NULL, pchan);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_channel_put(struct nouveau_channel **pchan)
|
||||
{
|
||||
mutex_unlock(&(*pchan)->mutex);
|
||||
nouveau_channel_put_unlocked(pchan);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_channel_del(struct kref *ref)
|
||||
{
|
||||
struct nouveau_channel *chan =
|
||||
container_of(ref, struct nouveau_channel, ref);
|
||||
|
||||
kfree(chan);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_channel_ref(struct nouveau_channel *chan,
|
||||
struct nouveau_channel **pchan)
|
||||
{
|
||||
if (chan)
|
||||
kref_get(&chan->ref);
|
||||
|
||||
if (*pchan)
|
||||
kref_put(&(*pchan)->ref, nouveau_channel_del);
|
||||
|
||||
*pchan = chan;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_channel_idle(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_fence *fence = NULL;
|
||||
int ret;
|
||||
|
||||
nouveau_fence_update(chan);
|
||||
|
||||
if (chan->fence.sequence != chan->fence.sequence_ack) {
|
||||
ret = nouveau_fence_new(chan, &fence, true);
|
||||
if (!ret) {
|
||||
ret = nouveau_fence_wait(fence, false, false);
|
||||
nouveau_fence_unref(&fence);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
|
||||
}
|
||||
}
|
||||
|
||||
/* cleans up all the fifos from file_priv */
|
||||
void
|
||||
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine = &dev_priv->engine;
|
||||
struct nouveau_channel *chan;
|
||||
int i;
|
||||
|
||||
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
|
||||
for (i = 0; i < engine->fifo.channels; i++) {
|
||||
struct nouveau_channel *chan = dev_priv->fifos[i];
|
||||
chan = nouveau_channel_get(dev, file_priv, i);
|
||||
if (IS_ERR(chan))
|
||||
continue;
|
||||
|
||||
if (chan && chan->file_priv == file_priv)
|
||||
nouveau_channel_free(chan);
|
||||
atomic_dec(&chan->users);
|
||||
nouveau_channel_put(&chan);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
|
||||
int channel)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine = &dev_priv->engine;
|
||||
|
||||
if (channel >= engine->fifo.channels)
|
||||
return 0;
|
||||
if (dev_priv->fifos[channel] == NULL)
|
||||
return 0;
|
||||
|
||||
return (dev_priv->fifos[channel]->file_priv == file_priv);
|
||||
}
|
||||
|
||||
/***********************************
|
||||
* ioctls wrapping the functions
|
||||
|
@ -383,36 +436,44 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
|
|||
else
|
||||
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
|
||||
|
||||
init->subchan[0].handle = NvM2MF;
|
||||
if (dev_priv->card_type < NV_50)
|
||||
init->subchan[0].grclass = 0x0039;
|
||||
else
|
||||
init->subchan[0].grclass = 0x5039;
|
||||
init->subchan[1].handle = NvSw;
|
||||
init->subchan[1].grclass = NV_SW;
|
||||
init->nr_subchan = 2;
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
init->subchan[0].handle = NvM2MF;
|
||||
if (dev_priv->card_type < NV_50)
|
||||
init->subchan[0].grclass = 0x0039;
|
||||
else
|
||||
init->subchan[0].grclass = 0x5039;
|
||||
init->subchan[1].handle = NvSw;
|
||||
init->subchan[1].grclass = NV_SW;
|
||||
init->nr_subchan = 2;
|
||||
} else {
|
||||
init->subchan[0].handle = 0x9039;
|
||||
init->subchan[0].grclass = 0x9039;
|
||||
init->nr_subchan = 1;
|
||||
}
|
||||
|
||||
/* Named memory object area */
|
||||
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
|
||||
&init->notifier_handle);
|
||||
if (ret) {
|
||||
nouveau_channel_free(chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (ret == 0)
|
||||
atomic_inc(&chan->users); /* userspace reference */
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_channel_free *cfree = data;
|
||||
struct drm_nouveau_channel_free *req = data;
|
||||
struct nouveau_channel *chan;
|
||||
|
||||
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
|
||||
chan = nouveau_channel_get(dev, file_priv, req->channel);
|
||||
if (IS_ERR(chan))
|
||||
return PTR_ERR(chan);
|
||||
|
||||
nouveau_channel_free(chan);
|
||||
atomic_dec(&chan->users);
|
||||
nouveau_channel_put(&chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -421,18 +482,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
|
|||
***********************************/
|
||||
|
||||
struct drm_ioctl_desc nouveau_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
|
||||
};
|
||||
|
||||
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
#include "nouveau_connector.h"
|
||||
#include "nouveau_hw.h"
|
||||
|
||||
static void nouveau_connector_hotplug(void *, int);
|
||||
|
||||
static struct nouveau_encoder *
|
||||
find_encoder_by_type(struct drm_connector *connector, int type)
|
||||
{
|
||||
|
@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
|
|||
}
|
||||
|
||||
static void
|
||||
nouveau_connector_destroy(struct drm_connector *drm_connector)
|
||||
nouveau_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct nouveau_connector *nv_connector =
|
||||
nouveau_connector(drm_connector);
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct nouveau_gpio_engine *pgpio;
|
||||
struct drm_device *dev;
|
||||
|
||||
if (!nv_connector)
|
||||
return;
|
||||
|
||||
dev = nv_connector->base.dev;
|
||||
dev_priv = dev->dev_private;
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
pgpio = &dev_priv->engine.gpio;
|
||||
if (pgpio->irq_unregister) {
|
||||
pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
|
||||
nouveau_connector_hotplug, connector);
|
||||
}
|
||||
|
||||
kfree(nv_connector->edid);
|
||||
drm_sysfs_connector_remove(drm_connector);
|
||||
drm_connector_cleanup(drm_connector);
|
||||
kfree(drm_connector);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
static struct nouveau_i2c_chan *
|
||||
|
@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
|
|||
{
|
||||
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
|
||||
struct nouveau_connector *nv_connector = NULL;
|
||||
struct dcb_connector_table_entry *dcb = NULL;
|
||||
struct drm_connector *connector;
|
||||
|
@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
|
|||
break;
|
||||
}
|
||||
|
||||
if (pgpio->irq_register) {
|
||||
pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
|
||||
nouveau_connector_hotplug, connector);
|
||||
}
|
||||
|
||||
drm_sysfs_connector_add(connector);
|
||||
dcb->drm = connector;
|
||||
return dcb->drm;
|
||||
|
@ -886,3 +902,29 @@ fail:
|
|||
return ERR_PTR(ret);
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_connector_hotplug(void *data, int plugged)
|
||||
{
|
||||
struct drm_connector *connector = data;
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
|
||||
drm_get_connector_name(connector));
|
||||
|
||||
if (connector->encoder && connector->encoder->crtc &&
|
||||
connector->encoder->crtc->enabled) {
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
|
||||
struct drm_encoder_helper_funcs *helper =
|
||||
connector->encoder->helper_private;
|
||||
|
||||
if (nv_encoder->dcb->type == OUTPUT_DP) {
|
||||
if (plugged)
|
||||
helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
|
||||
else
|
||||
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
}
|
||||
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,9 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_fb.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_hw.h"
|
||||
#include "nouveau_crtc.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
static void
|
||||
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
|
||||
|
@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
|
|||
.output_poll_changed = nouveau_fbcon_output_poll_changed,
|
||||
};
|
||||
|
||||
int
|
||||
nouveau_vblank_enable(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
|
||||
NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
|
||||
else
|
||||
NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
|
||||
NV_PCRTC_INTR_0_VBLANK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vblank_disable(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
|
||||
NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
|
||||
else
|
||||
NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
|
||||
struct nouveau_bo *new_bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
ttm_bo_unreserve(&new_bo->bo);
|
||||
fail:
|
||||
nouveau_bo_unpin(new_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
|
||||
struct nouveau_bo *new_bo,
|
||||
struct nouveau_fence *fence)
|
||||
{
|
||||
nouveau_bo_fence(new_bo, fence);
|
||||
ttm_bo_unreserve(&new_bo->bo);
|
||||
|
||||
nouveau_bo_fence(old_bo, fence);
|
||||
ttm_bo_unreserve(&old_bo->bo);
|
||||
|
||||
nouveau_bo_unpin(old_bo);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_page_flip_emit(struct nouveau_channel *chan,
|
||||
struct nouveau_bo *old_bo,
|
||||
struct nouveau_bo *new_bo,
|
||||
struct nouveau_page_flip_state *s,
|
||||
struct nouveau_fence **pfence)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* Queue it to the pending list */
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
list_add_tail(&s->head, &chan->nvsw.flip);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
/* Synchronize with the old framebuffer */
|
||||
ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Emit the pageflip */
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
|
||||
OUT_RING(chan, 0);
|
||||
FIRE_RING(chan);
|
||||
|
||||
ret = nouveau_fence_new(chan, pfence, true);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
list_del(&s->head);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
|
||||
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
|
||||
struct nouveau_page_flip_state *s;
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_fence *fence;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->engine.graph.accel_blocked)
|
||||
return -ENODEV;
|
||||
|
||||
s = kzalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Don't let the buffers go away while we flip */
|
||||
ret = nouveau_page_flip_reserve(old_bo, new_bo);
|
||||
if (ret)
|
||||
goto fail_free;
|
||||
|
||||
/* Initialize a page flip struct */
|
||||
*s = (struct nouveau_page_flip_state)
|
||||
{ { }, s->event, nouveau_crtc(crtc)->index,
|
||||
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
|
||||
new_bo->bo.offset };
|
||||
|
||||
/* Choose the channel the flip will be handled in */
|
||||
chan = nouveau_fence_channel(new_bo->bo.sync_obj);
|
||||
if (!chan)
|
||||
chan = nouveau_channel_get_unlocked(dev_priv->channel);
|
||||
mutex_lock(&chan->mutex);
|
||||
|
||||
/* Emit a page flip */
|
||||
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
|
||||
nouveau_channel_put(&chan);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
|
||||
/* Update the crtc struct and cleanup */
|
||||
crtc->fb = fb;
|
||||
|
||||
nouveau_page_flip_unreserve(old_bo, new_bo, fence);
|
||||
nouveau_fence_unref(&fence);
|
||||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
|
||||
fail_free:
|
||||
kfree(s);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_finish_page_flip(struct nouveau_channel *chan,
|
||||
struct nouveau_page_flip_state *ps)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_page_flip_state *s;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
if (list_empty(&chan->nvsw.flip)) {
|
||||
NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
s = list_first_entry(&chan->nvsw.flip,
|
||||
struct nouveau_page_flip_state, head);
|
||||
if (s->event) {
|
||||
struct drm_pending_vblank_event *e = s->event;
|
||||
struct timeval now;
|
||||
|
||||
do_gettimeofday(&now);
|
||||
e->event.sequence = 0;
|
||||
e->event.tv_sec = now.tv_sec;
|
||||
e->event.tv_usec = now.tv_usec;
|
||||
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
|
||||
wake_up_interruptible(&e->base.file_priv->event_wait);
|
||||
}
|
||||
|
||||
list_del(&s->head);
|
||||
*ps = *s;
|
||||
kfree(s);
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
|
|||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nouveau_bo *pushbuf = chan->pushbuf_bo;
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
const int ib_size = pushbuf->bo.mem.size / 2;
|
||||
|
||||
chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
|
||||
|
@ -59,17 +59,26 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
|||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret, i;
|
||||
|
||||
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
|
||||
ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
|
||||
0x0039 : 0x5039, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_insert(chan, NvM2MF, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
|
||||
OUT_RING (chan, 0x00009039);
|
||||
FIRE_RING (chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
|
||||
0x0039 : 0x5039);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -78,11 +87,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Map push buffer */
|
||||
ret = nouveau_bo_map(chan->pushbuf_bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
|
||||
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
|
||||
if (ret)
|
||||
|
|
|
@ -77,7 +77,8 @@ enum {
|
|||
/* G80+ display objects */
|
||||
NvEvoVRAM = 0x01000000,
|
||||
NvEvoFB16 = 0x01000001,
|
||||
NvEvoFB32 = 0x01000002
|
||||
NvEvoFB32 = 0x01000002,
|
||||
NvEvoVRAM_LP = 0x01000003
|
||||
};
|
||||
|
||||
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
|
||||
|
@ -124,6 +125,12 @@ OUT_RING(struct nouveau_channel *chan, int data)
|
|||
extern void
|
||||
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
|
||||
|
||||
static inline void
|
||||
BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
|
||||
{
|
||||
OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
|
||||
}
|
||||
|
||||
static inline void
|
||||
BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
|
||||
{
|
||||
|
|
|
@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
|
|||
struct bit_displayport_encoder_table *dpe;
|
||||
int dpe_headerlen;
|
||||
uint8_t config[4], status[3];
|
||||
bool cr_done, cr_max_vs, eq_done;
|
||||
bool cr_done, cr_max_vs, eq_done, hpd_state;
|
||||
int ret = 0, i, tries, voltage;
|
||||
|
||||
NV_DEBUG_KMS(dev, "link training!!\n");
|
||||
|
@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
|
|||
/* disable hotplug detect, this flips around on some panels during
|
||||
* link training.
|
||||
*/
|
||||
pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
|
||||
hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
|
||||
|
||||
if (dpe->script0) {
|
||||
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
|
||||
|
@ -439,7 +439,7 @@ stop:
|
|||
}
|
||||
|
||||
/* re-enable hotplug detect */
|
||||
pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
|
||||
pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
|
||||
|
||||
return eq_done;
|
||||
}
|
||||
|
|
|
@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
|
|||
int nouveau_perflvl_wr;
|
||||
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
|
||||
int nouveau_msi;
|
||||
module_param_named(msi, nouveau_msi, int, 0400);
|
||||
|
||||
int nouveau_fbpercrtc;
|
||||
#if 0
|
||||
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
|
||||
|
@ -167,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
|
|||
if (pm_state.event == PM_EVENT_PRETHAW)
|
||||
return 0;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
NV_INFO(dev, "Disabling fbcon acceleration...\n");
|
||||
nouveau_fbcon_save_disable_accel(dev);
|
||||
|
||||
|
@ -193,23 +200,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
|
|||
|
||||
NV_INFO(dev, "Idling channels...\n");
|
||||
for (i = 0; i < pfifo->channels; i++) {
|
||||
struct nouveau_fence *fence = NULL;
|
||||
chan = dev_priv->channels.ptr[i];
|
||||
|
||||
chan = dev_priv->fifos[i];
|
||||
if (!chan || (dev_priv->card_type >= NV_50 &&
|
||||
chan == dev_priv->fifos[0]))
|
||||
continue;
|
||||
|
||||
ret = nouveau_fence_new(chan, &fence, true);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_fence_wait(fence, NULL, false, false);
|
||||
nouveau_fence_unref((void *)&fence);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
|
||||
chan->id);
|
||||
}
|
||||
if (chan && chan->pushbuf_bo)
|
||||
nouveau_channel_idle(chan);
|
||||
}
|
||||
|
||||
pgraph->fifo_access(dev, false);
|
||||
|
@ -219,17 +213,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
|
|||
pfifo->unload_context(dev);
|
||||
pgraph->unload_context(dev);
|
||||
|
||||
NV_INFO(dev, "Suspending GPU objects...\n");
|
||||
ret = nouveau_gpuobj_suspend(dev);
|
||||
ret = pinstmem->suspend(dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "... failed: %d\n", ret);
|
||||
goto out_abort;
|
||||
}
|
||||
|
||||
ret = pinstmem->suspend(dev);
|
||||
NV_INFO(dev, "Suspending GPU objects...\n");
|
||||
ret = nouveau_gpuobj_suspend(dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "... failed: %d\n", ret);
|
||||
nouveau_gpuobj_suspend_cleanup(dev);
|
||||
pinstmem->resume(dev);
|
||||
goto out_abort;
|
||||
}
|
||||
|
||||
|
@ -263,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
|
|||
struct drm_crtc *crtc;
|
||||
int ret, i;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
nouveau_fbcon_save_disable_accel(dev);
|
||||
|
||||
NV_INFO(dev, "We're back, enabling device...\n");
|
||||
|
@ -294,17 +291,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
NV_INFO(dev, "Restoring GPU objects...\n");
|
||||
nouveau_gpuobj_resume(dev);
|
||||
|
||||
NV_INFO(dev, "Reinitialising engines...\n");
|
||||
engine->instmem.resume(dev);
|
||||
engine->mc.init(dev);
|
||||
engine->timer.init(dev);
|
||||
engine->fb.init(dev);
|
||||
engine->graph.init(dev);
|
||||
engine->crypt.init(dev);
|
||||
engine->fifo.init(dev);
|
||||
|
||||
NV_INFO(dev, "Restoring GPU objects...\n");
|
||||
nouveau_gpuobj_resume(dev);
|
||||
|
||||
nouveau_irq_postinstall(dev);
|
||||
|
||||
/* Re-write SKIPS, they'll have been lost over the suspend */
|
||||
|
@ -313,7 +311,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
|
|||
int j;
|
||||
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
chan = dev_priv->fifos[i];
|
||||
chan = dev_priv->channels.ptr[i];
|
||||
if (!chan || !chan->pushbuf_bo)
|
||||
continue;
|
||||
|
||||
|
@ -347,13 +345,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
|
|||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
|
||||
|
||||
nv_crtc->cursor.set_offset(nv_crtc,
|
||||
nv_crtc->cursor.nvbo->bo.offset -
|
||||
dev_priv->vm_vram_base);
|
||||
|
||||
nv_crtc->cursor.set_offset(nv_crtc, offset);
|
||||
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
|
||||
nv_crtc->cursor_saved_y);
|
||||
nv_crtc->cursor_saved_y);
|
||||
}
|
||||
|
||||
/* Force CLUT to get re-loaded during modeset */
|
||||
|
@ -393,6 +389,9 @@ static struct drm_driver driver = {
|
|||
.irq_postinstall = nouveau_irq_postinstall,
|
||||
.irq_uninstall = nouveau_irq_uninstall,
|
||||
.irq_handler = nouveau_irq_handler,
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.enable_vblank = nouveau_vblank_enable,
|
||||
.disable_vblank = nouveau_vblank_disable,
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.ioctls = nouveau_ioctls,
|
||||
.fops = {
|
||||
|
@ -403,6 +402,7 @@ static struct drm_driver driver = {
|
|||
.mmap = nouveau_ttm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
.read = drm_read,
|
||||
#if defined(CONFIG_COMPAT)
|
||||
.compat_ioctl = nouveau_compat_ioctl,
|
||||
#endif
|
||||
|
|
|
@ -54,22 +54,37 @@ struct nouveau_fpriv {
|
|||
#include "nouveau_drm.h"
|
||||
#include "nouveau_reg.h"
|
||||
#include "nouveau_bios.h"
|
||||
#include "nouveau_util.h"
|
||||
|
||||
struct nouveau_grctx;
|
||||
struct nouveau_vram;
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
#define MAX_NUM_DCB_ENTRIES 16
|
||||
|
||||
#define NOUVEAU_MAX_CHANNEL_NR 128
|
||||
#define NOUVEAU_MAX_TILE_NR 15
|
||||
|
||||
#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
|
||||
#define NV50_VM_BLOCK (512*1024*1024ULL)
|
||||
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
|
||||
struct nouveau_vram {
|
||||
struct drm_device *dev;
|
||||
|
||||
struct nouveau_vma bar_vma;
|
||||
u8 page_shift;
|
||||
|
||||
struct list_head regions;
|
||||
u32 memtype;
|
||||
u64 offset;
|
||||
u64 size;
|
||||
};
|
||||
|
||||
struct nouveau_tile_reg {
|
||||
struct nouveau_fence *fence;
|
||||
uint32_t addr;
|
||||
uint32_t size;
|
||||
bool used;
|
||||
uint32_t addr;
|
||||
uint32_t limit;
|
||||
uint32_t pitch;
|
||||
uint32_t zcomp;
|
||||
struct drm_mm_node *tag_mem;
|
||||
struct nouveau_fence *fence;
|
||||
};
|
||||
|
||||
struct nouveau_bo {
|
||||
|
@ -88,6 +103,7 @@ struct nouveau_bo {
|
|||
|
||||
struct nouveau_channel *channel;
|
||||
|
||||
struct nouveau_vma vma;
|
||||
bool mappable;
|
||||
bool no_vm;
|
||||
|
||||
|
@ -96,7 +112,6 @@ struct nouveau_bo {
|
|||
struct nouveau_tile_reg *tile;
|
||||
|
||||
struct drm_gem_object *gem;
|
||||
struct drm_file *cpu_filp;
|
||||
int pin_refcnt;
|
||||
};
|
||||
|
||||
|
@ -133,20 +148,28 @@ enum nouveau_flags {
|
|||
|
||||
#define NVOBJ_ENGINE_SW 0
|
||||
#define NVOBJ_ENGINE_GR 1
|
||||
#define NVOBJ_ENGINE_DISPLAY 2
|
||||
#define NVOBJ_ENGINE_PPP 2
|
||||
#define NVOBJ_ENGINE_COPY 3
|
||||
#define NVOBJ_ENGINE_VP 4
|
||||
#define NVOBJ_ENGINE_CRYPT 5
|
||||
#define NVOBJ_ENGINE_BSP 6
|
||||
#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
|
||||
#define NVOBJ_ENGINE_INT 0xdeadbeef
|
||||
|
||||
#define NVOBJ_FLAG_DONT_MAP (1 << 0)
|
||||
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
||||
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
||||
#define NVOBJ_FLAG_VM (1 << 3)
|
||||
|
||||
#define NVOBJ_CINST_GLOBAL 0xdeadbeef
|
||||
|
||||
struct nouveau_gpuobj {
|
||||
struct drm_device *dev;
|
||||
struct kref refcount;
|
||||
struct list_head list;
|
||||
|
||||
struct drm_mm_node *im_pramin;
|
||||
struct nouveau_bo *im_backing;
|
||||
uint32_t *im_backing_suspend;
|
||||
int im_bound;
|
||||
void *node;
|
||||
u32 *suspend;
|
||||
|
||||
uint32_t flags;
|
||||
|
||||
|
@ -162,10 +185,29 @@ struct nouveau_gpuobj {
|
|||
void *priv;
|
||||
};
|
||||
|
||||
struct nouveau_page_flip_state {
|
||||
struct list_head head;
|
||||
struct drm_pending_vblank_event *event;
|
||||
int crtc, bpp, pitch, x, y;
|
||||
uint64_t offset;
|
||||
};
|
||||
|
||||
enum nouveau_channel_mutex_class {
|
||||
NOUVEAU_UCHANNEL_MUTEX,
|
||||
NOUVEAU_KCHANNEL_MUTEX
|
||||
};
|
||||
|
||||
struct nouveau_channel {
|
||||
struct drm_device *dev;
|
||||
int id;
|
||||
|
||||
/* references to the channel data structure */
|
||||
struct kref ref;
|
||||
/* users of the hardware channel resources, the hardware
|
||||
* context will be kicked off when it reaches zero. */
|
||||
atomic_t users;
|
||||
struct mutex mutex;
|
||||
|
||||
/* owner of this fifo */
|
||||
struct drm_file *file_priv;
|
||||
/* mapping of the fifo itself */
|
||||
|
@ -198,16 +240,17 @@ struct nouveau_channel {
|
|||
/* PFIFO context */
|
||||
struct nouveau_gpuobj *ramfc;
|
||||
struct nouveau_gpuobj *cache;
|
||||
void *fifo_priv;
|
||||
|
||||
/* PGRAPH context */
|
||||
/* XXX may be merge 2 pointers as private data ??? */
|
||||
struct nouveau_gpuobj *ramin_grctx;
|
||||
struct nouveau_gpuobj *crypt_ctx;
|
||||
void *pgraph_ctx;
|
||||
|
||||
/* NV50 VM */
|
||||
struct nouveau_vm *vm;
|
||||
struct nouveau_gpuobj *vm_pd;
|
||||
struct nouveau_gpuobj *vm_gart_pt;
|
||||
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
|
||||
|
||||
/* Objects */
|
||||
struct nouveau_gpuobj *ramin; /* Private instmem */
|
||||
|
@ -238,9 +281,11 @@ struct nouveau_channel {
|
|||
|
||||
struct {
|
||||
struct nouveau_gpuobj *vblsem;
|
||||
uint32_t vblsem_head;
|
||||
uint32_t vblsem_offset;
|
||||
uint32_t vblsem_rval;
|
||||
struct list_head vbl_wait;
|
||||
struct list_head flip;
|
||||
} nvsw;
|
||||
|
||||
struct {
|
||||
|
@ -258,11 +303,11 @@ struct nouveau_instmem_engine {
|
|||
int (*suspend)(struct drm_device *dev);
|
||||
void (*resume)(struct drm_device *dev);
|
||||
|
||||
int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
|
||||
uint32_t *size);
|
||||
void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
|
||||
int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
|
||||
int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
|
||||
int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||
void (*put)(struct nouveau_gpuobj *);
|
||||
int (*map)(struct nouveau_gpuobj *);
|
||||
void (*unmap)(struct nouveau_gpuobj *);
|
||||
|
||||
void (*flush)(struct drm_device *);
|
||||
};
|
||||
|
||||
|
@ -279,15 +324,21 @@ struct nouveau_timer_engine {
|
|||
|
||||
struct nouveau_fb_engine {
|
||||
int num_tiles;
|
||||
struct drm_mm tag_heap;
|
||||
void *priv;
|
||||
|
||||
int (*init)(struct drm_device *dev);
|
||||
void (*takedown)(struct drm_device *dev);
|
||||
|
||||
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch);
|
||||
void (*init_tile_region)(struct drm_device *dev, int i,
|
||||
uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
void (*set_tile_region)(struct drm_device *dev, int i);
|
||||
void (*free_tile_region)(struct drm_device *dev, int i);
|
||||
};
|
||||
|
||||
struct nouveau_fifo_engine {
|
||||
void *priv;
|
||||
int channels;
|
||||
|
||||
struct nouveau_gpuobj *playlist[2];
|
||||
|
@ -310,22 +361,11 @@ struct nouveau_fifo_engine {
|
|||
void (*tlb_flush)(struct drm_device *dev);
|
||||
};
|
||||
|
||||
struct nouveau_pgraph_object_method {
|
||||
int id;
|
||||
int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
|
||||
uint32_t data);
|
||||
};
|
||||
|
||||
struct nouveau_pgraph_object_class {
|
||||
int id;
|
||||
bool software;
|
||||
struct nouveau_pgraph_object_method *methods;
|
||||
};
|
||||
|
||||
struct nouveau_pgraph_engine {
|
||||
struct nouveau_pgraph_object_class *grclass;
|
||||
bool accel_blocked;
|
||||
bool registered;
|
||||
int grctx_size;
|
||||
void *priv;
|
||||
|
||||
/* NV2x/NV3x context table (0x400780) */
|
||||
struct nouveau_gpuobj *ctx_table;
|
||||
|
@ -342,8 +382,7 @@ struct nouveau_pgraph_engine {
|
|||
int (*unload_context)(struct drm_device *);
|
||||
void (*tlb_flush)(struct drm_device *dev);
|
||||
|
||||
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch);
|
||||
void (*set_tile_region)(struct drm_device *dev, int i);
|
||||
};
|
||||
|
||||
struct nouveau_display_engine {
|
||||
|
@ -355,13 +394,19 @@ struct nouveau_display_engine {
|
|||
};
|
||||
|
||||
struct nouveau_gpio_engine {
|
||||
void *priv;
|
||||
|
||||
int (*init)(struct drm_device *);
|
||||
void (*takedown)(struct drm_device *);
|
||||
|
||||
int (*get)(struct drm_device *, enum dcb_gpio_tag);
|
||||
int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
|
||||
|
||||
void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
|
||||
int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
|
||||
void (*)(void *, int), void *);
|
||||
void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
|
||||
void (*)(void *, int), void *);
|
||||
bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
|
||||
};
|
||||
|
||||
struct nouveau_pm_voltage_level {
|
||||
|
@ -437,6 +482,7 @@ struct nouveau_pm_engine {
|
|||
struct nouveau_pm_level *cur;
|
||||
|
||||
struct device *hwmon;
|
||||
struct notifier_block acpi_nb;
|
||||
|
||||
int (*clock_get)(struct drm_device *, u32 id);
|
||||
void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
|
||||
|
@ -449,6 +495,25 @@ struct nouveau_pm_engine {
|
|||
int (*temp_get)(struct drm_device *);
|
||||
};
|
||||
|
||||
struct nouveau_crypt_engine {
|
||||
bool registered;
|
||||
|
||||
int (*init)(struct drm_device *);
|
||||
void (*takedown)(struct drm_device *);
|
||||
int (*create_context)(struct nouveau_channel *);
|
||||
void (*destroy_context)(struct nouveau_channel *);
|
||||
void (*tlb_flush)(struct drm_device *dev);
|
||||
};
|
||||
|
||||
struct nouveau_vram_engine {
|
||||
int (*init)(struct drm_device *);
|
||||
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
|
||||
u32 type, struct nouveau_vram **);
|
||||
void (*put)(struct drm_device *, struct nouveau_vram **);
|
||||
|
||||
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
|
||||
};
|
||||
|
||||
struct nouveau_engine {
|
||||
struct nouveau_instmem_engine instmem;
|
||||
struct nouveau_mc_engine mc;
|
||||
|
@ -459,6 +524,8 @@ struct nouveau_engine {
|
|||
struct nouveau_display_engine display;
|
||||
struct nouveau_gpio_engine gpio;
|
||||
struct nouveau_pm_engine pm;
|
||||
struct nouveau_crypt_engine crypt;
|
||||
struct nouveau_vram_engine vram;
|
||||
};
|
||||
|
||||
struct nouveau_pll_vals {
|
||||
|
@ -577,18 +644,15 @@ struct drm_nouveau_private {
|
|||
bool ramin_available;
|
||||
struct drm_mm ramin_heap;
|
||||
struct list_head gpuobj_list;
|
||||
struct list_head classes;
|
||||
|
||||
struct nouveau_bo *vga_ram;
|
||||
|
||||
/* interrupt handling */
|
||||
void (*irq_handler[32])(struct drm_device *);
|
||||
bool msi_enabled;
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct irq_work;
|
||||
struct work_struct hpd_work;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
uint32_t hpd0_bits;
|
||||
uint32_t hpd1_bits;
|
||||
} hpd_state;
|
||||
|
||||
struct list_head vbl_waiting;
|
||||
|
||||
|
@ -605,8 +669,10 @@ struct drm_nouveau_private {
|
|||
struct nouveau_bo *bo;
|
||||
} fence;
|
||||
|
||||
int fifo_alloc_count;
|
||||
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
|
||||
} channels;
|
||||
|
||||
struct nouveau_engine engine;
|
||||
struct nouveau_channel *channel;
|
||||
|
@ -632,12 +698,14 @@ struct drm_nouveau_private {
|
|||
uint64_t aper_free;
|
||||
|
||||
struct nouveau_gpuobj *sg_ctxdma;
|
||||
struct page *sg_dummy_page;
|
||||
dma_addr_t sg_dummy_bus;
|
||||
struct nouveau_vma vma;
|
||||
} gart_info;
|
||||
|
||||
/* nv10-nv40 tiling regions */
|
||||
struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
|
||||
struct {
|
||||
struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
|
||||
spinlock_t lock;
|
||||
} tile;
|
||||
|
||||
/* VRAM/fb configuration */
|
||||
uint64_t vram_size;
|
||||
|
@ -650,14 +718,12 @@ struct drm_nouveau_private {
|
|||
uint64_t fb_aper_free;
|
||||
int fb_mtrr;
|
||||
|
||||
/* BAR control (NV50-) */
|
||||
struct nouveau_vm *bar1_vm;
|
||||
struct nouveau_vm *bar3_vm;
|
||||
|
||||
/* G8x/G9x virtual address space */
|
||||
uint64_t vm_gart_base;
|
||||
uint64_t vm_gart_size;
|
||||
uint64_t vm_vram_base;
|
||||
uint64_t vm_vram_size;
|
||||
uint64_t vm_end;
|
||||
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
|
||||
int vm_vram_pt_nr;
|
||||
struct nouveau_vm *chan_vm;
|
||||
|
||||
struct nvbios vbios;
|
||||
|
||||
|
@ -674,6 +740,7 @@ struct drm_nouveau_private {
|
|||
struct backlight_device *backlight;
|
||||
|
||||
struct nouveau_channel *evo;
|
||||
u32 evo_alloc;
|
||||
struct {
|
||||
struct dcb_entry *dcb;
|
||||
u16 script;
|
||||
|
@ -686,6 +753,8 @@ struct drm_nouveau_private {
|
|||
|
||||
struct nouveau_fbdev *nfbdev;
|
||||
struct apertures_struct *apertures;
|
||||
|
||||
bool powered_down;
|
||||
};
|
||||
|
||||
static inline struct drm_nouveau_private *
|
||||
|
@ -719,16 +788,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
|
||||
struct drm_nouveau_private *nv = dev->dev_private; \
|
||||
if (!nouveau_channel_owner(dev, (cl), (id))) { \
|
||||
NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
|
||||
DRM_CURRENTPID, (id)); \
|
||||
return -EPERM; \
|
||||
} \
|
||||
(ch) = nv->fifos[(id)]; \
|
||||
} while (0)
|
||||
|
||||
/* nouveau_drv.c */
|
||||
extern int nouveau_agpmode;
|
||||
extern int nouveau_duallink;
|
||||
|
@ -748,6 +807,7 @@ extern int nouveau_force_post;
|
|||
extern int nouveau_override_conntype;
|
||||
extern char *nouveau_perflvl;
|
||||
extern int nouveau_perflvl_wr;
|
||||
extern int nouveau_msi;
|
||||
|
||||
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
|
||||
extern int nouveau_pci_resume(struct pci_dev *pdev);
|
||||
|
@ -762,8 +822,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
|
|||
struct drm_file *);
|
||||
extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
|
||||
struct drm_file *);
|
||||
extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
|
||||
uint32_t reg, uint32_t mask, uint32_t val);
|
||||
extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
|
||||
uint32_t reg, uint32_t mask, uint32_t val);
|
||||
extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
|
||||
uint32_t reg, uint32_t mask, uint32_t val);
|
||||
extern bool nouveau_wait_for_idle(struct drm_device *);
|
||||
extern int nouveau_card_init(struct drm_device *);
|
||||
|
||||
|
@ -775,18 +837,18 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
|
|||
extern int nouveau_mem_init_agp(struct drm_device *);
|
||||
extern int nouveau_mem_reset_agp(struct drm_device *);
|
||||
extern void nouveau_mem_close(struct drm_device *);
|
||||
extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
|
||||
uint32_t addr,
|
||||
uint32_t size,
|
||||
uint32_t pitch);
|
||||
extern void nv10_mem_expire_tiling(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence);
|
||||
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
|
||||
uint32_t size, uint32_t flags,
|
||||
uint64_t phys);
|
||||
extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
|
||||
uint32_t size);
|
||||
extern int nouveau_mem_detect(struct drm_device *);
|
||||
extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
|
||||
extern struct nouveau_tile_reg *nv10_mem_set_tiling(
|
||||
struct drm_device *dev, uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
extern void nv10_mem_put_tile_region(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence);
|
||||
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
|
||||
|
||||
/* nvc0_vram.c */
|
||||
extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
|
||||
|
||||
/* nouveau_notifier.c */
|
||||
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
|
||||
|
@ -803,21 +865,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
|
|||
extern struct drm_ioctl_desc nouveau_ioctls[];
|
||||
extern int nouveau_max_ioctl;
|
||||
extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
|
||||
extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
|
||||
int channel);
|
||||
extern int nouveau_channel_alloc(struct drm_device *dev,
|
||||
struct nouveau_channel **chan,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
|
||||
extern void nouveau_channel_free(struct nouveau_channel *);
|
||||
extern struct nouveau_channel *
|
||||
nouveau_channel_get_unlocked(struct nouveau_channel *);
|
||||
extern struct nouveau_channel *
|
||||
nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
|
||||
extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
|
||||
extern void nouveau_channel_put(struct nouveau_channel **);
|
||||
extern void nouveau_channel_ref(struct nouveau_channel *chan,
|
||||
struct nouveau_channel **pchan);
|
||||
extern void nouveau_channel_idle(struct nouveau_channel *chan);
|
||||
|
||||
/* nouveau_object.c */
|
||||
#define NVOBJ_CLASS(d,c,e) do { \
|
||||
int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
|
||||
if (ret) \
|
||||
return ret; \
|
||||
} while(0)
|
||||
|
||||
#define NVOBJ_MTHD(d,c,m,e) do { \
|
||||
int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
|
||||
if (ret) \
|
||||
return ret; \
|
||||
} while(0)
|
||||
|
||||
extern int nouveau_gpuobj_early_init(struct drm_device *);
|
||||
extern int nouveau_gpuobj_init(struct drm_device *);
|
||||
extern void nouveau_gpuobj_takedown(struct drm_device *);
|
||||
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
|
||||
extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
|
||||
extern void nouveau_gpuobj_resume(struct drm_device *dev);
|
||||
extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
|
||||
extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
|
||||
int (*exec)(struct nouveau_channel *,
|
||||
u32 class, u32 mthd, u32 data));
|
||||
extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
|
||||
extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
|
||||
extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
|
||||
uint32_t vram_h, uint32_t tt_h);
|
||||
extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
|
||||
|
@ -832,21 +917,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
|
|||
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
|
||||
uint64_t offset, uint64_t size, int access,
|
||||
int target, struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
|
||||
uint64_t offset, uint64_t size,
|
||||
int access, struct nouveau_gpuobj **,
|
||||
uint32_t *o_ret);
|
||||
extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
|
||||
extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
|
||||
u64 size, int target, int access, u32 type,
|
||||
u32 comp, struct nouveau_gpuobj **pobj);
|
||||
extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
|
||||
int class, u64 base, u64 size, int target,
|
||||
int access, u32 type, u32 comp);
|
||||
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
|
||||
struct drm_file *);
|
||||
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
|
||||
struct drm_file *);
|
||||
|
||||
/* nouveau_irq.c */
|
||||
extern int nouveau_irq_init(struct drm_device *);
|
||||
extern void nouveau_irq_fini(struct drm_device *);
|
||||
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
|
||||
extern void nouveau_irq_register(struct drm_device *, int status_bit,
|
||||
void (*)(struct drm_device *));
|
||||
extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
|
||||
extern void nouveau_irq_preinstall(struct drm_device *);
|
||||
extern int nouveau_irq_postinstall(struct drm_device *);
|
||||
extern void nouveau_irq_uninstall(struct drm_device *);
|
||||
|
@ -854,8 +943,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
|
|||
/* nouveau_sgdma.c */
|
||||
extern int nouveau_sgdma_init(struct drm_device *);
|
||||
extern void nouveau_sgdma_takedown(struct drm_device *);
|
||||
extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
|
||||
uint32_t *page);
|
||||
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
|
||||
uint32_t offset);
|
||||
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
|
||||
|
||||
/* nouveau_debugfs.c */
|
||||
|
@ -966,18 +1055,25 @@ extern void nv04_fb_takedown(struct drm_device *);
|
|||
/* nv10_fb.c */
|
||||
extern int nv10_fb_init(struct drm_device *);
|
||||
extern void nv10_fb_takedown(struct drm_device *);
|
||||
extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
|
||||
uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
|
||||
extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv30_fb.c */
|
||||
extern int nv30_fb_init(struct drm_device *);
|
||||
extern void nv30_fb_takedown(struct drm_device *);
|
||||
extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
|
||||
uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv40_fb.c */
|
||||
extern int nv40_fb_init(struct drm_device *);
|
||||
extern void nv40_fb_takedown(struct drm_device *);
|
||||
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv50_fb.c */
|
||||
extern int nv50_fb_init(struct drm_device *);
|
||||
extern void nv50_fb_takedown(struct drm_device *);
|
||||
|
@ -989,6 +1085,7 @@ extern void nvc0_fb_takedown(struct drm_device *);
|
|||
|
||||
/* nv04_fifo.c */
|
||||
extern int nv04_fifo_init(struct drm_device *);
|
||||
extern void nv04_fifo_fini(struct drm_device *);
|
||||
extern void nv04_fifo_disable(struct drm_device *);
|
||||
extern void nv04_fifo_enable(struct drm_device *);
|
||||
extern bool nv04_fifo_reassign(struct drm_device *, bool);
|
||||
|
@ -998,19 +1095,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *);
|
|||
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
|
||||
extern int nv04_fifo_load_context(struct nouveau_channel *);
|
||||
extern int nv04_fifo_unload_context(struct drm_device *);
|
||||
extern void nv04_fifo_isr(struct drm_device *);
|
||||
|
||||
/* nv10_fifo.c */
|
||||
extern int nv10_fifo_init(struct drm_device *);
|
||||
extern int nv10_fifo_channel_id(struct drm_device *);
|
||||
extern int nv10_fifo_create_context(struct nouveau_channel *);
|
||||
extern void nv10_fifo_destroy_context(struct nouveau_channel *);
|
||||
extern int nv10_fifo_load_context(struct nouveau_channel *);
|
||||
extern int nv10_fifo_unload_context(struct drm_device *);
|
||||
|
||||
/* nv40_fifo.c */
|
||||
extern int nv40_fifo_init(struct drm_device *);
|
||||
extern int nv40_fifo_create_context(struct nouveau_channel *);
|
||||
extern void nv40_fifo_destroy_context(struct nouveau_channel *);
|
||||
extern int nv40_fifo_load_context(struct nouveau_channel *);
|
||||
extern int nv40_fifo_unload_context(struct drm_device *);
|
||||
|
||||
|
@ -1038,7 +1134,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
|
|||
extern int nvc0_fifo_unload_context(struct drm_device *);
|
||||
|
||||
/* nv04_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
|
||||
extern int nv04_graph_init(struct drm_device *);
|
||||
extern void nv04_graph_takedown(struct drm_device *);
|
||||
extern void nv04_graph_fifo_access(struct drm_device *, bool);
|
||||
|
@ -1047,10 +1142,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
|
|||
extern void nv04_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv04_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv04_graph_unload_context(struct drm_device *);
|
||||
extern void nv04_graph_context_switch(struct drm_device *);
|
||||
extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data);
|
||||
extern struct nouveau_bitfield nv04_graph_nsource[];
|
||||
|
||||
/* nv10_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
|
||||
extern int nv10_graph_init(struct drm_device *);
|
||||
extern void nv10_graph_takedown(struct drm_device *);
|
||||
extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
|
||||
|
@ -1058,13 +1154,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *);
|
|||
extern void nv10_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv10_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv10_graph_unload_context(struct drm_device *);
|
||||
extern void nv10_graph_context_switch(struct drm_device *);
|
||||
extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
extern struct nouveau_bitfield nv10_graph_intr[];
|
||||
extern struct nouveau_bitfield nv10_graph_nstatus[];
|
||||
|
||||
/* nv20_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
|
||||
extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
|
||||
extern int nv20_graph_create_context(struct nouveau_channel *);
|
||||
extern void nv20_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv20_graph_load_context(struct nouveau_channel *);
|
||||
|
@ -1072,11 +1166,9 @@ extern int nv20_graph_unload_context(struct drm_device *);
|
|||
extern int nv20_graph_init(struct drm_device *);
|
||||
extern void nv20_graph_takedown(struct drm_device *);
|
||||
extern int nv30_graph_init(struct drm_device *);
|
||||
extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv40_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
|
||||
extern int nv40_graph_init(struct drm_device *);
|
||||
extern void nv40_graph_takedown(struct drm_device *);
|
||||
extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
|
||||
|
@ -1085,11 +1177,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
|
|||
extern int nv40_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_unload_context(struct drm_device *);
|
||||
extern void nv40_grctx_init(struct nouveau_grctx *);
|
||||
extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv50_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
|
||||
extern int nv50_graph_init(struct drm_device *);
|
||||
extern void nv50_graph_takedown(struct drm_device *);
|
||||
extern void nv50_graph_fifo_access(struct drm_device *, bool);
|
||||
|
@ -1098,10 +1188,10 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
|
|||
extern void nv50_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv50_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv50_graph_unload_context(struct drm_device *);
|
||||
extern void nv50_graph_context_switch(struct drm_device *);
|
||||
extern int nv50_grctx_init(struct nouveau_grctx *);
|
||||
extern void nv50_graph_tlb_flush(struct drm_device *dev);
|
||||
extern void nv86_graph_tlb_flush(struct drm_device *dev);
|
||||
extern struct nouveau_enum nv50_data_error_names[];
|
||||
|
||||
/* nvc0_graph.c */
|
||||
extern int nvc0_graph_init(struct drm_device *);
|
||||
|
@ -1113,16 +1203,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *);
|
|||
extern int nvc0_graph_load_context(struct nouveau_channel *);
|
||||
extern int nvc0_graph_unload_context(struct drm_device *);
|
||||
|
||||
/* nv84_crypt.c */
|
||||
extern int nv84_crypt_init(struct drm_device *dev);
|
||||
extern void nv84_crypt_fini(struct drm_device *dev);
|
||||
extern int nv84_crypt_create_context(struct nouveau_channel *);
|
||||
extern void nv84_crypt_destroy_context(struct nouveau_channel *);
|
||||
extern void nv84_crypt_tlb_flush(struct drm_device *dev);
|
||||
|
||||
/* nv04_instmem.c */
|
||||
extern int nv04_instmem_init(struct drm_device *);
|
||||
extern void nv04_instmem_takedown(struct drm_device *);
|
||||
extern int nv04_instmem_suspend(struct drm_device *);
|
||||
extern void nv04_instmem_resume(struct drm_device *);
|
||||
extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
|
||||
uint32_t *size);
|
||||
extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||
extern void nv04_instmem_put(struct nouveau_gpuobj *);
|
||||
extern int nv04_instmem_map(struct nouveau_gpuobj *);
|
||||
extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
|
||||
extern void nv04_instmem_flush(struct drm_device *);
|
||||
|
||||
/* nv50_instmem.c */
|
||||
|
@ -1130,26 +1226,18 @@ extern int nv50_instmem_init(struct drm_device *);
|
|||
extern void nv50_instmem_takedown(struct drm_device *);
|
||||
extern int nv50_instmem_suspend(struct drm_device *);
|
||||
extern void nv50_instmem_resume(struct drm_device *);
|
||||
extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
|
||||
uint32_t *size);
|
||||
extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||
extern void nv50_instmem_put(struct nouveau_gpuobj *);
|
||||
extern int nv50_instmem_map(struct nouveau_gpuobj *);
|
||||
extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
|
||||
extern void nv50_instmem_flush(struct drm_device *);
|
||||
extern void nv84_instmem_flush(struct drm_device *);
|
||||
extern void nv50_vm_flush(struct drm_device *, int engine);
|
||||
|
||||
/* nvc0_instmem.c */
|
||||
extern int nvc0_instmem_init(struct drm_device *);
|
||||
extern void nvc0_instmem_takedown(struct drm_device *);
|
||||
extern int nvc0_instmem_suspend(struct drm_device *);
|
||||
extern void nvc0_instmem_resume(struct drm_device *);
|
||||
extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
|
||||
uint32_t *size);
|
||||
extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern void nvc0_instmem_flush(struct drm_device *);
|
||||
|
||||
/* nv04_mc.c */
|
||||
extern int nv04_mc_init(struct drm_device *);
|
||||
|
@ -1219,6 +1307,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
|
|||
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
|
||||
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
|
||||
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
|
||||
extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
|
||||
extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
|
||||
bool no_wait_reserve, bool no_wait_gpu);
|
||||
|
||||
/* nouveau_fence.c */
|
||||
struct nouveau_fence;
|
||||
|
@ -1234,12 +1325,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence,
|
|||
void (*work)(void *priv, bool signalled),
|
||||
void *priv);
|
||||
struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
|
||||
extern bool nouveau_fence_signalled(void *obj, void *arg);
|
||||
extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
|
||||
|
||||
extern bool __nouveau_fence_signalled(void *obj, void *arg);
|
||||
extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
|
||||
extern int __nouveau_fence_flush(void *obj, void *arg);
|
||||
extern void __nouveau_fence_unref(void **obj);
|
||||
extern void *__nouveau_fence_ref(void *obj);
|
||||
|
||||
static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
|
||||
{
|
||||
return __nouveau_fence_signalled(obj, NULL);
|
||||
}
|
||||
static inline int
|
||||
nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
|
||||
{
|
||||
return __nouveau_fence_wait(obj, NULL, lazy, intr);
|
||||
}
|
||||
extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
|
||||
extern int nouveau_fence_flush(void *obj, void *arg);
|
||||
extern void nouveau_fence_unref(void **obj);
|
||||
extern void *nouveau_fence_ref(void *obj);
|
||||
static inline int nouveau_fence_flush(struct nouveau_fence *obj)
|
||||
{
|
||||
return __nouveau_fence_flush(obj, NULL);
|
||||
}
|
||||
static inline void nouveau_fence_unref(struct nouveau_fence **obj)
|
||||
{
|
||||
__nouveau_fence_unref((void **)obj);
|
||||
}
|
||||
static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
|
||||
{
|
||||
return __nouveau_fence_ref(obj);
|
||||
}
|
||||
|
||||
/* nouveau_gem.c */
|
||||
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
|
||||
|
@ -1259,15 +1373,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
|
|||
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
|
||||
struct drm_file *);
|
||||
|
||||
/* nouveau_display.c */
|
||||
int nouveau_vblank_enable(struct drm_device *dev, int crtc);
|
||||
void nouveau_vblank_disable(struct drm_device *dev, int crtc);
|
||||
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event);
|
||||
int nouveau_finish_page_flip(struct nouveau_channel *,
|
||||
struct nouveau_page_flip_state *);
|
||||
|
||||
/* nv10_gpio.c */
|
||||
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
|
||||
int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
|
||||
|
||||
/* nv50_gpio.c */
|
||||
int nv50_gpio_init(struct drm_device *dev);
|
||||
void nv50_gpio_fini(struct drm_device *dev);
|
||||
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
|
||||
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
|
||||
void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
|
||||
int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
|
||||
void (*)(void *, int), void *);
|
||||
void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
|
||||
void (*)(void *, int), void *);
|
||||
bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
|
||||
|
||||
/* nv50_calc. */
|
||||
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
|
||||
|
@ -1334,7 +1461,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
|
|||
}
|
||||
|
||||
#define nv_wait(dev, reg, mask, val) \
|
||||
nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
|
||||
nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
|
||||
#define nv_wait_ne(dev, reg, mask, val) \
|
||||
nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
|
||||
|
||||
/* PRAMIN access */
|
||||
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
|
||||
|
@ -1447,6 +1576,23 @@ nv_match_device(struct drm_device *dev, unsigned device,
|
|||
dev->pdev->subsystem_device == sub_device;
|
||||
}
|
||||
|
||||
/* memory type/access flags, do not match hardware values */
|
||||
#define NV_MEM_ACCESS_RO 1
|
||||
#define NV_MEM_ACCESS_WO 2
|
||||
#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
|
||||
#define NV_MEM_ACCESS_SYS 4
|
||||
#define NV_MEM_ACCESS_VM 8
|
||||
|
||||
#define NV_MEM_TARGET_VRAM 0
|
||||
#define NV_MEM_TARGET_PCI 1
|
||||
#define NV_MEM_TARGET_PCI_NOSNOOP 2
|
||||
#define NV_MEM_TARGET_VM 3
|
||||
#define NV_MEM_TARGET_GART 4
|
||||
|
||||
#define NV_MEM_TYPE_VM 0x7f
|
||||
#define NV_MEM_COMP_VM 0x03
|
||||
|
||||
/* NV_SW object class */
|
||||
#define NV_SW 0x0000506e
|
||||
#define NV_SW_DMA_SEMAPHORE 0x00000060
|
||||
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
|
||||
|
@ -1457,5 +1603,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
|
|||
#define NV_SW_VBLSEM_OFFSET 0x00000400
|
||||
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
|
||||
#define NV_SW_VBLSEM_RELEASE 0x00000408
|
||||
#define NV_SW_PAGE_FLIP 0x00000500
|
||||
|
||||
#endif /* __NOUVEAU_DRV_H__ */
|
||||
|
|
|
@ -49,6 +49,102 @@
|
|||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
static void
|
||||
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
mutex_trylock(&dev_priv->channel->mutex)) {
|
||||
if (dev_priv->card_type < NV_50)
|
||||
ret = nv04_fbcon_fillrect(info, rect);
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_fillrect(info, rect);
|
||||
else
|
||||
ret = nvc0_fbcon_fillrect(info, rect);
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
if (ret != -ENODEV)
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
cfb_fillrect(info, rect);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
mutex_trylock(&dev_priv->channel->mutex)) {
|
||||
if (dev_priv->card_type < NV_50)
|
||||
ret = nv04_fbcon_copyarea(info, image);
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_copyarea(info, image);
|
||||
else
|
||||
ret = nvc0_fbcon_copyarea(info, image);
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
if (ret != -ENODEV)
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
cfb_copyarea(info, image);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
mutex_trylock(&dev_priv->channel->mutex)) {
|
||||
if (dev_priv->card_type < NV_50)
|
||||
ret = nv04_fbcon_imageblit(info, image);
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_imageblit(info, image);
|
||||
else
|
||||
ret = nvc0_fbcon_imageblit(info, image);
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
if (ret != -ENODEV)
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
cfb_imageblit(info, image);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fbcon_sync(struct fb_info *info)
|
||||
{
|
||||
|
@ -58,22 +154,36 @@ nouveau_fbcon_sync(struct fb_info *info)
|
|||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret, i;
|
||||
|
||||
if (!chan || !chan->accel_done ||
|
||||
if (!chan || !chan->accel_done || in_interrupt() ||
|
||||
info->state != FBINFO_STATE_RUNNING ||
|
||||
info->flags & FBINFO_HWACCEL_DISABLED)
|
||||
return 0;
|
||||
|
||||
if (RING_SPACE(chan, 4)) {
|
||||
if (!mutex_trylock(&chan->mutex))
|
||||
return 0;
|
||||
|
||||
ret = RING_SPACE(chan, 4);
|
||||
if (ret) {
|
||||
mutex_unlock(&chan->mutex);
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
BEGIN_RING(chan, 0, 0x0104, 1);
|
||||
OUT_RING(chan, 0);
|
||||
BEGIN_RING(chan, 0, 0x0100, 1);
|
||||
OUT_RING(chan, 0);
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
|
||||
OUT_RING (chan, 0);
|
||||
BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
|
||||
OUT_RING (chan, 0);
|
||||
} else {
|
||||
BEGIN_RING(chan, 0, 0x0104, 1);
|
||||
OUT_RING (chan, 0);
|
||||
BEGIN_RING(chan, 0, 0x0100, 1);
|
||||
OUT_RING (chan, 0);
|
||||
}
|
||||
|
||||
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
|
||||
FIRE_RING(chan);
|
||||
mutex_unlock(&chan->mutex);
|
||||
|
||||
ret = -EBUSY;
|
||||
for (i = 0; i < 100000; i++) {
|
||||
|
@ -94,43 +204,27 @@ nouveau_fbcon_sync(struct fb_info *info)
|
|||
}
|
||||
|
||||
static struct fb_ops nouveau_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = nouveau_fbcon_fillrect,
|
||||
.fb_copyarea = nouveau_fbcon_copyarea,
|
||||
.fb_imageblit = nouveau_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
.fb_debug_enter = drm_fb_helper_debug_enter,
|
||||
.fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
static struct fb_ops nouveau_fbcon_sw_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = cfb_fillrect,
|
||||
.fb_copyarea = cfb_copyarea,
|
||||
.fb_imageblit = cfb_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
.fb_debug_enter = drm_fb_helper_debug_enter,
|
||||
.fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
static struct fb_ops nv04_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = nv04_fbcon_fillrect,
|
||||
.fb_copyarea = nv04_fbcon_copyarea,
|
||||
.fb_imageblit = nv04_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
.fb_debug_enter = drm_fb_helper_debug_enter,
|
||||
.fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
static struct fb_ops nv50_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = nv50_fbcon_fillrect,
|
||||
.fb_copyarea = nv50_fbcon_copyarea,
|
||||
.fb_imageblit = nv50_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
|
@ -257,21 +351,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
|
|||
FBINFO_HWACCEL_FILLRECT |
|
||||
FBINFO_HWACCEL_IMAGEBLIT;
|
||||
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
|
||||
info->fbops = &nouveau_fbcon_ops;
|
||||
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
|
||||
dev_priv->vm_vram_base;
|
||||
info->fbops = &nouveau_fbcon_sw_ops;
|
||||
info->fix.smem_start = dev->mode_config.fb_base +
|
||||
(nvbo->bo.mem.start << PAGE_SHIFT);
|
||||
info->fix.smem_len = size;
|
||||
|
||||
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
|
||||
info->screen_size = size;
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
|
||||
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
/* FIXME: we really shouldn't expose mmio space at all */
|
||||
info->fix.mmio_start = pci_resource_start(pdev, 1);
|
||||
info->fix.mmio_len = pci_resource_len(pdev, 1);
|
||||
|
||||
/* Set aperture base/size for vesafb takeover */
|
||||
info->apertures = dev_priv->apertures;
|
||||
if (!info->apertures) {
|
||||
|
@ -285,19 +374,20 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
|
|||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
info->pixmap.scan_align = 1;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (dev_priv->channel && !nouveau_nofbaccel) {
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_C0:
|
||||
break;
|
||||
case NV_50:
|
||||
nv50_fbcon_accel_init(info);
|
||||
info->fbops = &nv50_fbcon_ops;
|
||||
break;
|
||||
default:
|
||||
nv04_fbcon_accel_init(info);
|
||||
info->fbops = &nv04_fbcon_ops;
|
||||
break;
|
||||
};
|
||||
ret = -ENODEV;
|
||||
if (dev_priv->card_type < NV_50)
|
||||
ret = nv04_fbcon_accel_init(info);
|
||||
else
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
ret = nv50_fbcon_accel_init(info);
|
||||
else
|
||||
ret = nvc0_fbcon_accel_init(info);
|
||||
|
||||
if (ret == 0)
|
||||
info->fbops = &nouveau_fbcon_ops;
|
||||
}
|
||||
|
||||
nouveau_fbcon_zfill(dev, nfbdev);
|
||||
|
@ -308,7 +398,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
|
|||
nouveau_fb->base.height,
|
||||
nvbo->bo.offset, nvbo);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
vga_switcheroo_client_fb_set(dev->pdev, info);
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -40,15 +40,21 @@ struct nouveau_fbdev {
|
|||
|
||||
void nouveau_fbcon_restore(void);
|
||||
|
||||
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv04_fbcon_accel_init(struct fb_info *info);
|
||||
void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
|
||||
int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nv50_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
|
||||
int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
|
||||
int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
|
||||
int nvc0_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
|
||||
|
||||
int nouveau_fbcon_init(struct drm_device *dev);
|
||||
|
|
|
@ -32,7 +32,8 @@
|
|||
#include "nouveau_dma.h"
|
||||
|
||||
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
|
||||
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
|
||||
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
|
||||
nouveau_private(dev)->card_type < NV_C0)
|
||||
|
||||
struct nouveau_fence {
|
||||
struct nouveau_channel *channel;
|
||||
|
@ -64,6 +65,7 @@ nouveau_fence_del(struct kref *ref)
|
|||
struct nouveau_fence *fence =
|
||||
container_of(ref, struct nouveau_fence, refcount);
|
||||
|
||||
nouveau_channel_ref(NULL, &fence->channel);
|
||||
kfree(fence);
|
||||
}
|
||||
|
||||
|
@ -76,14 +78,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||
|
||||
spin_lock(&chan->fence.lock);
|
||||
|
||||
if (USE_REFCNT(dev))
|
||||
sequence = nvchan_rd32(chan, 0x48);
|
||||
else
|
||||
sequence = atomic_read(&chan->fence.last_sequence_irq);
|
||||
/* Fetch the last sequence if the channel is still up and running */
|
||||
if (likely(!list_empty(&chan->fence.pending))) {
|
||||
if (USE_REFCNT(dev))
|
||||
sequence = nvchan_rd32(chan, 0x48);
|
||||
else
|
||||
sequence = atomic_read(&chan->fence.last_sequence_irq);
|
||||
|
||||
if (chan->fence.sequence_ack == sequence)
|
||||
goto out;
|
||||
chan->fence.sequence_ack = sequence;
|
||||
if (chan->fence.sequence_ack == sequence)
|
||||
goto out;
|
||||
chan->fence.sequence_ack = sequence;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
|
||||
sequence = fence->sequence;
|
||||
|
@ -113,13 +118,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
|
|||
if (!fence)
|
||||
return -ENOMEM;
|
||||
kref_init(&fence->refcount);
|
||||
fence->channel = chan;
|
||||
nouveau_channel_ref(chan, &fence->channel);
|
||||
|
||||
if (emit)
|
||||
ret = nouveau_fence_emit(fence);
|
||||
|
||||
if (ret)
|
||||
nouveau_fence_unref((void *)&fence);
|
||||
nouveau_fence_unref(&fence);
|
||||
*pfence = fence;
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,7 +132,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
|
|||
struct nouveau_channel *
|
||||
nouveau_fence_channel(struct nouveau_fence *fence)
|
||||
{
|
||||
return fence ? fence->channel : NULL;
|
||||
return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -135,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
|||
{
|
||||
struct nouveau_channel *chan = fence->channel;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
|
@ -155,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
|||
list_add_tail(&fence->entry, &chan->fence.pending);
|
||||
spin_unlock(&chan->fence.lock);
|
||||
|
||||
BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
|
||||
OUT_RING(chan, fence->sequence);
|
||||
if (USE_REFCNT(dev)) {
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
BEGIN_RING(chan, NvSubSw, 0x0050, 1);
|
||||
else
|
||||
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
|
||||
} else {
|
||||
BEGIN_RING(chan, NvSubSw, 0x0150, 1);
|
||||
}
|
||||
OUT_RING (chan, fence->sequence);
|
||||
FIRE_RING(chan);
|
||||
|
||||
return 0;
|
||||
|
@ -182,7 +195,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
|
|||
}
|
||||
|
||||
void
|
||||
nouveau_fence_unref(void **sync_obj)
|
||||
__nouveau_fence_unref(void **sync_obj)
|
||||
{
|
||||
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
|
||||
|
||||
|
@ -192,7 +205,7 @@ nouveau_fence_unref(void **sync_obj)
|
|||
}
|
||||
|
||||
void *
|
||||
nouveau_fence_ref(void *sync_obj)
|
||||
__nouveau_fence_ref(void *sync_obj)
|
||||
{
|
||||
struct nouveau_fence *fence = nouveau_fence(sync_obj);
|
||||
|
||||
|
@ -201,7 +214,7 @@ nouveau_fence_ref(void *sync_obj)
|
|||
}
|
||||
|
||||
bool
|
||||
nouveau_fence_signalled(void *sync_obj, void *sync_arg)
|
||||
__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
struct nouveau_fence *fence = nouveau_fence(sync_obj);
|
||||
struct nouveau_channel *chan = fence->channel;
|
||||
|
@ -214,13 +227,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
|
|||
}
|
||||
|
||||
int
|
||||
nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
|
||||
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
|
||||
{
|
||||
unsigned long timeout = jiffies + (3 * DRM_HZ);
|
||||
unsigned long sleep_time = jiffies + 1;
|
||||
int ret = 0;
|
||||
|
||||
while (1) {
|
||||
if (nouveau_fence_signalled(sync_obj, sync_arg))
|
||||
if (__nouveau_fence_signalled(sync_obj, sync_arg))
|
||||
break;
|
||||
|
||||
if (time_after_eq(jiffies, timeout)) {
|
||||
|
@ -230,7 +244,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
|
|||
|
||||
__set_current_state(intr ? TASK_INTERRUPTIBLE
|
||||
: TASK_UNINTERRUPTIBLE);
|
||||
if (lazy)
|
||||
if (lazy && time_after_eq(jiffies, sleep_time))
|
||||
schedule_timeout(1);
|
||||
|
||||
if (intr && signal_pending(current)) {
|
||||
|
@ -368,7 +382,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
|
|||
|
||||
kref_get(&sema->ref);
|
||||
nouveau_fence_work(fence, semaphore_work, sema);
|
||||
nouveau_fence_unref((void *)&fence);
|
||||
nouveau_fence_unref(&fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -380,33 +394,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
|
|||
struct nouveau_channel *chan = nouveau_fence_channel(fence);
|
||||
struct drm_device *dev = wchan->dev;
|
||||
struct nouveau_semaphore *sema;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (likely(!fence || chan == wchan ||
|
||||
nouveau_fence_signalled(fence, NULL)))
|
||||
return 0;
|
||||
if (likely(!chan || chan == wchan ||
|
||||
nouveau_fence_signalled(fence)))
|
||||
goto out;
|
||||
|
||||
sema = alloc_semaphore(dev);
|
||||
if (!sema) {
|
||||
/* Early card or broken userspace, fall back to
|
||||
* software sync. */
|
||||
return nouveau_fence_wait(fence, NULL, false, false);
|
||||
ret = nouveau_fence_wait(fence, true, false);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* try to take chan's mutex, if we can't take it right away
|
||||
* we have to fallback to software sync to prevent locking
|
||||
* order issues
|
||||
*/
|
||||
if (!mutex_trylock(&chan->mutex)) {
|
||||
ret = nouveau_fence_wait(fence, true, false);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
/* Make wchan wait until it gets signalled */
|
||||
ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
|
||||
/* Signal the semaphore from chan */
|
||||
ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
|
||||
out:
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&chan->mutex);
|
||||
out_unref:
|
||||
kref_put(&sema->ref, free_semaphore);
|
||||
out:
|
||||
if (chan)
|
||||
nouveau_channel_put_unlocked(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_fence_flush(void *sync_obj, void *sync_arg)
|
||||
__nouveau_fence_flush(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -420,30 +450,27 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
|||
int ret;
|
||||
|
||||
/* Create an NV_SW object for various sync purposes */
|
||||
ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_insert(chan, NvSw, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
BEGIN_RING(chan, NvSubSw, 0, 1);
|
||||
OUT_RING(chan, NvSw);
|
||||
/* we leave subchannel empty for nvc0 */
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
BEGIN_RING(chan, NvSubSw, 0, 1);
|
||||
OUT_RING(chan, NvSw);
|
||||
}
|
||||
|
||||
/* Create a DMA object for the shared cross-channel sync area. */
|
||||
if (USE_SEMA(dev)) {
|
||||
struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
|
||||
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
|
||||
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||
mem->start << PAGE_SHIFT,
|
||||
mem->size << PAGE_SHIFT,
|
||||
NV_DMA_ACCESS_RW,
|
||||
NV_DMA_TARGET_VIDMEM, &obj);
|
||||
mem->size, NV_MEM_ACCESS_RW,
|
||||
NV_MEM_TARGET_VRAM, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -473,6 +500,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
|
|||
{
|
||||
struct nouveau_fence *tmp, *fence;
|
||||
|
||||
spin_lock(&chan->fence.lock);
|
||||
|
||||
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
|
||||
fence->signalled = true;
|
||||
list_del(&fence->entry);
|
||||
|
@ -482,6 +511,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
|
|||
|
||||
kref_put(&fence->refcount, nouveau_fence_del);
|
||||
}
|
||||
|
||||
spin_unlock(&chan->fence.lock);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
|
|||
return;
|
||||
nvbo->gem = NULL;
|
||||
|
||||
if (unlikely(nvbo->cpu_filp))
|
||||
ttm_bo_synccpu_write_release(bo);
|
||||
|
||||
if (unlikely(nvbo->pin_refcnt)) {
|
||||
nvbo->pin_refcnt = 1;
|
||||
nouveau_bo_unpin(nvbo);
|
||||
|
@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
|
||||
case 0x0000:
|
||||
case 0x1800:
|
||||
case 0x2800:
|
||||
case 0x4800:
|
||||
case 0x7000:
|
||||
case 0x7400:
|
||||
case 0x7a00:
|
||||
case 0xe000:
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
|
||||
return true;
|
||||
}
|
||||
|
||||
NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
|
@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
|||
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
|
||||
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
|
||||
|
||||
if (req->channel_hint) {
|
||||
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
|
||||
file_priv, chan);
|
||||
}
|
||||
|
||||
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
|
||||
flags |= TTM_PL_FLAG_VRAM;
|
||||
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
|
||||
|
@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
|||
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
|
||||
flags |= TTM_PL_FLAG_SYSTEM;
|
||||
|
||||
if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
|
||||
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
|
||||
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (req->channel_hint) {
|
||||
chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
|
||||
if (IS_ERR(chan))
|
||||
return PTR_ERR(chan);
|
||||
}
|
||||
|
||||
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
|
||||
req->info.tile_mode, req->info.tile_flags, false,
|
||||
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
|
||||
&nvbo);
|
||||
if (chan)
|
||||
nouveau_channel_put(&chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
|
|||
|
||||
list_for_each_safe(entry, tmp, list) {
|
||||
nvbo = list_entry(entry, struct nouveau_bo, entry);
|
||||
if (likely(fence)) {
|
||||
struct nouveau_fence *prev_fence;
|
||||
|
||||
spin_lock(&nvbo->bo.lock);
|
||||
prev_fence = nvbo->bo.sync_obj;
|
||||
nvbo->bo.sync_obj = nouveau_fence_ref(fence);
|
||||
spin_unlock(&nvbo->bo.lock);
|
||||
nouveau_fence_unref((void *)&prev_fence);
|
||||
}
|
||||
nouveau_bo_fence(nvbo, fence);
|
||||
|
||||
if (unlikely(nvbo->validate_mapped)) {
|
||||
ttm_bo_kunmap(&nvbo->kmap);
|
||||
|
@ -299,14 +268,15 @@ retry:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
|
||||
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
|
||||
if (ret) {
|
||||
validate_fini(op, NULL);
|
||||
if (ret == -EAGAIN)
|
||||
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
|
||||
if (unlikely(ret == -EAGAIN))
|
||||
ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "fail reserve\n");
|
||||
if (unlikely(ret)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "fail reserve\n");
|
||||
return ret;
|
||||
}
|
||||
goto retry;
|
||||
|
@ -331,25 +301,6 @@ retry:
|
|||
validate_fini(op, NULL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
|
||||
validate_fini(op, NULL);
|
||||
|
||||
if (nvbo->cpu_filp == file_priv) {
|
||||
NV_ERROR(dev, "bo %p mapped by process trying "
|
||||
"to validate it!\n", nvbo);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
|
||||
mutex_lock(&drm_global_mutex);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "fail wait_cpu\n");
|
||||
return ret;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
|
|||
}
|
||||
|
||||
nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
|
||||
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
|
||||
false, false, false);
|
||||
ret = nouveau_bo_validate(nvbo, true, false, false);
|
||||
nvbo->channel = NULL;
|
||||
if (unlikely(ret)) {
|
||||
NV_ERROR(dev, "fail ttm_validate\n");
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "fail ttm_validate\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
|||
|
||||
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
|
||||
if (unlikely(ret)) {
|
||||
NV_ERROR(dev, "validate_init\n");
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "validate_init\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
|
||||
if (unlikely(ret < 0)) {
|
||||
NV_ERROR(dev, "validate vram_list\n");
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "validate vram_list\n");
|
||||
validate_fini(op, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
|||
|
||||
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
|
||||
if (unlikely(ret < 0)) {
|
||||
NV_ERROR(dev, "validate gart_list\n");
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "validate gart_list\n");
|
||||
validate_fini(op, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
|||
|
||||
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
|
||||
if (unlikely(ret < 0)) {
|
||||
NV_ERROR(dev, "validate both_list\n");
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "validate both_list\n");
|
||||
validate_fini(op, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
|
|||
data |= r->vor;
|
||||
}
|
||||
|
||||
spin_lock(&nvbo->bo.lock);
|
||||
spin_lock(&nvbo->bo.bdev->fence_lock);
|
||||
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
|
||||
spin_unlock(&nvbo->bo.lock);
|
||||
spin_unlock(&nvbo->bo.bdev->fence_lock);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
|
||||
break;
|
||||
|
@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
struct nouveau_fence *fence = NULL;
|
||||
int i, j, ret = 0, do_reloc = 0;
|
||||
|
||||
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
|
||||
chan = nouveau_channel_get(dev, file_priv, req->channel);
|
||||
if (IS_ERR(chan))
|
||||
return PTR_ERR(chan);
|
||||
|
||||
req->vram_available = dev_priv->fb_aper_free;
|
||||
req->gart_available = dev_priv->gart_info.aper_free;
|
||||
|
@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
|
||||
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
|
||||
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
|
||||
nouveau_channel_put(&chan);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
|
||||
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
|
||||
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
|
||||
nouveau_channel_put(&chan);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
|
||||
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
|
||||
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
|
||||
nouveau_channel_put(&chan);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
|
||||
if (IS_ERR(push))
|
||||
if (IS_ERR(push)) {
|
||||
nouveau_channel_put(&chan);
|
||||
return PTR_ERR(push);
|
||||
}
|
||||
|
||||
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
|
||||
if (IS_ERR(bo)) {
|
||||
kfree(push);
|
||||
nouveau_channel_put(&chan);
|
||||
return PTR_ERR(bo);
|
||||
}
|
||||
|
||||
|
@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
|
||||
req->nr_buffers, &op, &do_reloc);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "validate: %d\n", ret);
|
||||
if (ret != -ERESTARTSYS)
|
||||
NV_ERROR(dev, "validate: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
|
||||
out:
|
||||
validate_fini(&op, fence);
|
||||
nouveau_fence_unref((void**)&fence);
|
||||
nouveau_fence_unref(&fence);
|
||||
kfree(bo);
|
||||
kfree(push);
|
||||
|
||||
|
@ -750,6 +714,7 @@ out_next:
|
|||
req->suffix1 = 0x00000000;
|
||||
}
|
||||
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
|
|||
return -ENOENT;
|
||||
nvbo = nouveau_gem_object(gem);
|
||||
|
||||
if (nvbo->cpu_filp) {
|
||||
if (nvbo->cpu_filp == file_priv)
|
||||
goto out;
|
||||
|
||||
ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
|
||||
spin_lock(&nvbo->bo.lock);
|
||||
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
|
||||
spin_unlock(&nvbo->bo.lock);
|
||||
} else {
|
||||
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
|
||||
if (ret == 0)
|
||||
nvbo->cpu_filp = file_priv;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_lock(&nvbo->bo.bdev->fence_lock);
|
||||
ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
|
||||
spin_unlock(&nvbo->bo.bdev->fence_lock);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
}
|
||||
|
@ -809,26 +757,7 @@ int
|
|||
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_gem_cpu_prep *req = data;
|
||||
struct drm_gem_object *gem;
|
||||
struct nouveau_bo *nvbo;
|
||||
int ret = -EINVAL;
|
||||
|
||||
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
|
||||
if (!gem)
|
||||
return -ENOENT;
|
||||
nvbo = nouveau_gem_object(gem);
|
||||
|
||||
if (nvbo->cpu_filp != file_priv)
|
||||
goto out;
|
||||
nvbo->cpu_filp = NULL;
|
||||
|
||||
ttm_bo_synccpu_write_release(&nvbo->bo);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
|||
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
|
||||
|
||||
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
|
||||
if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
|
||||
if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
|
||||
else
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
|
||||
|
@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
|||
if (dev_priv->card_type == NV_10) {
|
||||
/* Not waiting for vertical retrace before modifying
|
||||
CRE_53/CRE_54 causes lockups. */
|
||||
nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
|
||||
nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
|
||||
nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
|
||||
nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
|
||||
}
|
||||
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
|
||||
|
@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
|||
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
|
||||
|
||||
/* Setting 1 on this value gives you interrupts for every vblank period. */
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
|
||||
/* Enable vblank interrupts. */
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
|
||||
(dev->vblank_enabled[head] ? 1 : 0));
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
|
||||
}
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -36,183 +36,112 @@
|
|||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_pm.h"
|
||||
#include "nouveau_mm.h"
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
/*
|
||||
* NV10-NV40 tiling helpers
|
||||
*/
|
||||
|
||||
static void
|
||||
nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv10_mem_update_tile_region(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile[i];
|
||||
int i = tile - dev_priv->tile.reg;
|
||||
unsigned long save;
|
||||
|
||||
tile->addr = addr;
|
||||
tile->size = size;
|
||||
tile->used = !!pitch;
|
||||
nouveau_fence_unref((void **)&tile->fence);
|
||||
nouveau_fence_unref(&tile->fence);
|
||||
|
||||
if (tile->pitch)
|
||||
pfb->free_tile_region(dev, i);
|
||||
|
||||
if (pitch)
|
||||
pfb->init_tile_region(dev, i, addr, size, pitch, flags);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
|
||||
pfifo->reassign(dev, false);
|
||||
pfifo->cache_pull(dev, false);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
pgraph->set_region_tiling(dev, i, addr, size, pitch);
|
||||
pfb->set_region_tiling(dev, i, addr, size, pitch);
|
||||
pfb->set_tile_region(dev, i);
|
||||
pgraph->set_tile_region(dev, i);
|
||||
|
||||
pfifo->cache_pull(dev, true);
|
||||
pfifo->reassign(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
|
||||
}
|
||||
|
||||
static struct nouveau_tile_reg *
|
||||
nv10_mem_get_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
|
||||
if (!tile->used &&
|
||||
(!tile->fence || nouveau_fence_signalled(tile->fence)))
|
||||
tile->used = true;
|
||||
else
|
||||
tile = NULL;
|
||||
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
return tile;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (tile) {
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
if (fence) {
|
||||
/* Mark it as pending. */
|
||||
tile->fence = fence;
|
||||
nouveau_fence_ref(fence);
|
||||
}
|
||||
|
||||
tile->used = false;
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
}
|
||||
}
|
||||
|
||||
struct nouveau_tile_reg *
|
||||
nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
|
||||
uint32_t pitch)
|
||||
uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_tile_reg *found = NULL;
|
||||
unsigned long i, flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
for (i = 0; i < pfb->num_tiles; i++) {
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile[i];
|
||||
|
||||
if (tile->used)
|
||||
/* Tile region in use. */
|
||||
continue;
|
||||
|
||||
if (tile->fence &&
|
||||
!nouveau_fence_signalled(tile->fence, NULL))
|
||||
/* Pending tile region. */
|
||||
continue;
|
||||
|
||||
if (max(tile->addr, addr) <
|
||||
min(tile->addr + tile->size, addr + size))
|
||||
/* Kill an intersecting tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
|
||||
|
||||
if (pitch && !found) {
|
||||
/* Free tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
|
||||
found = tile;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence)
|
||||
{
|
||||
if (fence) {
|
||||
/* Mark it as pending. */
|
||||
tile->fence = fence;
|
||||
nouveau_fence_ref(fence);
|
||||
}
|
||||
|
||||
tile->used = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* NV50 VM helpers
|
||||
*/
|
||||
int
|
||||
nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
|
||||
uint32_t flags, uint64_t phys)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
unsigned block;
|
||||
struct nouveau_tile_reg *tile, *found = NULL;
|
||||
int i;
|
||||
|
||||
virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
|
||||
size = (size >> 16) << 1;
|
||||
for (i = 0; i < pfb->num_tiles; i++) {
|
||||
tile = nv10_mem_get_tile_region(dev, i);
|
||||
|
||||
phys |= ((uint64_t)flags << 32);
|
||||
phys |= 1;
|
||||
if (dev_priv->vram_sys_base) {
|
||||
phys += dev_priv->vram_sys_base;
|
||||
phys |= 0x30;
|
||||
if (pitch && !found) {
|
||||
found = tile;
|
||||
continue;
|
||||
|
||||
} else if (tile && tile->pitch) {
|
||||
/* Kill an unused tile region. */
|
||||
nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
nv10_mem_put_tile_region(dev, tile, NULL);
|
||||
}
|
||||
|
||||
while (size) {
|
||||
unsigned offset_h = upper_32_bits(phys);
|
||||
unsigned offset_l = lower_32_bits(phys);
|
||||
unsigned pte, end;
|
||||
|
||||
for (i = 7; i >= 0; i--) {
|
||||
block = 1 << (i + 1);
|
||||
if (size >= block && !(virt & (block - 1)))
|
||||
break;
|
||||
}
|
||||
offset_l |= (i << 7);
|
||||
|
||||
phys += block << 15;
|
||||
size -= block;
|
||||
|
||||
while (block) {
|
||||
pgt = dev_priv->vm_vram_pt[virt >> 14];
|
||||
pte = virt & 0x3ffe;
|
||||
|
||||
end = pte + block;
|
||||
if (end > 16384)
|
||||
end = 16384;
|
||||
block -= (end - pte);
|
||||
virt += (end - pte);
|
||||
|
||||
while (pte < end) {
|
||||
nv_wo32(pgt, (pte * 4) + 0, offset_l);
|
||||
nv_wo32(pgt, (pte * 4) + 4, offset_h);
|
||||
pte += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
dev_priv->engine.fifo.tlb_flush(dev);
|
||||
dev_priv->engine.graph.tlb_flush(dev);
|
||||
nv50_vm_flush(dev, 6);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
unsigned pages, pte, end;
|
||||
|
||||
virt -= dev_priv->vm_vram_base;
|
||||
pages = (size >> 16) << 1;
|
||||
|
||||
while (pages) {
|
||||
pgt = dev_priv->vm_vram_pt[virt >> 29];
|
||||
pte = (virt & 0x1ffe0000ULL) >> 15;
|
||||
|
||||
end = pte + pages;
|
||||
if (end > 16384)
|
||||
end = 16384;
|
||||
pages -= (end - pte);
|
||||
virt += (end - pte) << 15;
|
||||
|
||||
while (pte < end) {
|
||||
nv_wo32(pgt, (pte * 4), 0);
|
||||
pte++;
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
dev_priv->engine.fifo.tlb_flush(dev);
|
||||
dev_priv->engine.graph.tlb_flush(dev);
|
||||
nv50_vm_flush(dev, 6);
|
||||
if (found)
|
||||
nv10_mem_update_tile_region(dev, found, addr, size,
|
||||
pitch, flags);
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_vram_preinit(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int i, parts, colbits, rowbitsa, rowbitsb, banks;
|
||||
u64 rowsize, predicted;
|
||||
u32 r0, r4, rt, ru;
|
||||
|
||||
r0 = nv_rd32(dev, 0x100200);
|
||||
r4 = nv_rd32(dev, 0x100204);
|
||||
rt = nv_rd32(dev, 0x100250);
|
||||
ru = nv_rd32(dev, 0x001540);
|
||||
NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
|
||||
|
||||
for (i = 0, parts = 0; i < 8; i++) {
|
||||
if (ru & (0x00010000 << i))
|
||||
parts++;
|
||||
}
|
||||
|
||||
colbits = (r4 & 0x0000f000) >> 12;
|
||||
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
|
||||
rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
|
||||
banks = ((r4 & 0x01000000) ? 8 : 4);
|
||||
|
||||
rowsize = parts * banks * (1 << colbits) * 8;
|
||||
predicted = rowsize << rowbitsa;
|
||||
if (r0 & 0x00000004)
|
||||
predicted += rowsize << rowbitsb;
|
||||
|
||||
if (predicted != dev_priv->vram_size) {
|
||||
NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
|
||||
(u32)(dev_priv->vram_size >> 20));
|
||||
NV_WARN(dev, "we calculated %dMiB VRAM\n",
|
||||
(u32)(predicted >> 20));
|
||||
}
|
||||
|
||||
dev_priv->vram_rblock_size = rowsize >> 12;
|
||||
if (rt & 1)
|
||||
dev_priv->vram_rblock_size *= 3;
|
||||
|
||||
NV_DEBUG(dev, "rblock %lld bytes\n",
|
||||
(u64)dev_priv->vram_rblock_size << 12);
|
||||
}
|
||||
|
||||
static void
|
||||
nvaa_vram_preinit(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* To our knowledge, there's no large scale reordering of pages
|
||||
* that occurs on IGP chipsets.
|
||||
*/
|
||||
dev_priv->vram_rblock_size = 1;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
nouveau_mem_detect(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
@ -381,33 +255,6 @@ nouveau_mem_detect(struct drm_device *dev)
|
|||
if (dev_priv->card_type < NV_50) {
|
||||
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
|
||||
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
|
||||
} else
|
||||
if (dev_priv->card_type < NV_C0) {
|
||||
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
|
||||
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
|
||||
dev_priv->vram_size &= 0xffffffff00ll;
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0xaa:
|
||||
case 0xac:
|
||||
case 0xaf:
|
||||
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
|
||||
dev_priv->vram_sys_base <<= 12;
|
||||
nvaa_vram_preinit(dev);
|
||||
break;
|
||||
default:
|
||||
nv50_vram_preinit(dev);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
|
||||
dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
|
||||
}
|
||||
|
||||
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
|
||||
if (dev_priv->vram_sys_base) {
|
||||
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
|
||||
dev_priv->vram_sys_base);
|
||||
}
|
||||
|
||||
if (dev_priv->vram_size)
|
||||
|
@ -415,6 +262,15 @@ nouveau_mem_detect(struct drm_device *dev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bool
|
||||
nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
|
||||
{
|
||||
if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
static unsigned long
|
||||
get_agp_mode(struct drm_device *dev, unsigned long mode)
|
||||
|
@ -547,10 +403,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_mem_detect(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
|
||||
|
||||
ret = nouveau_ttm_global_init(dev_priv);
|
||||
|
@ -566,13 +418,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->fb_available_size = dev_priv->vram_size;
|
||||
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
|
||||
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
|
||||
dev_priv->fb_mappable_pages =
|
||||
pci_resource_len(dev->pdev, 1);
|
||||
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
|
||||
|
||||
/* reserve space at end of VRAM for PRAMIN */
|
||||
if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
|
||||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
|
||||
|
@ -583,6 +428,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||
else
|
||||
dev_priv->ramin_rsvd_vram = (512 * 1024);
|
||||
|
||||
ret = dev_priv->engine.vram.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
|
||||
if (dev_priv->vram_sys_base) {
|
||||
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
|
||||
dev_priv->vram_sys_base);
|
||||
}
|
||||
|
||||
dev_priv->fb_available_size = dev_priv->vram_size;
|
||||
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
|
||||
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
|
||||
dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
|
||||
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
|
||||
|
||||
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
|
||||
dev_priv->fb_aper_free = dev_priv->fb_available_size;
|
||||
|
||||
|
@ -799,3 +660,118 @@ nouveau_mem_timing_fini(struct drm_device *dev)
|
|||
|
||||
kfree(mem->timing);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
|
||||
struct nouveau_mm *mm;
|
||||
u32 b_size;
|
||||
int ret;
|
||||
|
||||
p_size = (p_size << PAGE_SHIFT) >> 12;
|
||||
b_size = dev_priv->vram_rblock_size >> 12;
|
||||
|
||||
ret = nouveau_mm_init(&mm, 0, p_size, b_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
man->priv = mm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct nouveau_mm *mm = man->priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mm_fini(&mm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
man->priv = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
|
||||
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
|
||||
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nouveau_vram *node;
|
||||
u32 size_nc = 0;
|
||||
int ret;
|
||||
|
||||
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
|
||||
size_nc = 1 << nvbo->vma.node->type;
|
||||
|
||||
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
|
||||
mem->page_alignment << PAGE_SHIFT, size_nc,
|
||||
(nvbo->tile_flags >> 8) & 0xff, &node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
node->page_shift = 12;
|
||||
if (nvbo->vma.node)
|
||||
node->page_shift = nvbo->vma.node->type;
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->offset >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
|
||||
{
|
||||
struct nouveau_mm *mm = man->priv;
|
||||
struct nouveau_mm_node *r;
|
||||
u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
|
||||
int i;
|
||||
|
||||
mutex_lock(&mm->mutex);
|
||||
list_for_each_entry(r, &mm->nodes, nl_entry) {
|
||||
printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
|
||||
prefix, r->free ? "free" : "used", r->type,
|
||||
((u64)r->offset << 12),
|
||||
(((u64)r->offset + r->length) << 12));
|
||||
total += r->length;
|
||||
ttotal[r->type] += r->length;
|
||||
if (r->free)
|
||||
tfree[r->type] += r->length;
|
||||
else
|
||||
tused[r->type] += r->length;
|
||||
}
|
||||
mutex_unlock(&mm->mutex);
|
||||
|
||||
printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
|
||||
for (i = 0; i < 3; i++) {
|
||||
printk(KERN_DEBUG "%s type %d: 0x%010llx, "
|
||||
"used 0x%010llx, free 0x%010llx\n", prefix,
|
||||
i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
|
||||
}
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
|
||||
nouveau_vram_manager_init,
|
||||
nouveau_vram_manager_fini,
|
||||
nouveau_vram_manager_new,
|
||||
nouveau_vram_manager_del,
|
||||
nouveau_vram_manager_debug
|
||||
};
|
||||
|
|
|
@ -0,0 +1,271 @@
|
|||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
static inline void
|
||||
region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
|
||||
{
|
||||
list_del(&a->nl_entry);
|
||||
list_del(&a->fl_entry);
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
static struct nouveau_mm_node *
|
||||
region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
|
||||
{
|
||||
struct nouveau_mm_node *b;
|
||||
|
||||
if (a->length == size)
|
||||
return a;
|
||||
|
||||
b = kmalloc(sizeof(*b), GFP_KERNEL);
|
||||
if (unlikely(b == NULL))
|
||||
return NULL;
|
||||
|
||||
b->offset = a->offset;
|
||||
b->length = size;
|
||||
b->free = a->free;
|
||||
b->type = a->type;
|
||||
a->offset += size;
|
||||
a->length -= size;
|
||||
list_add_tail(&b->nl_entry, &a->nl_entry);
|
||||
if (b->free)
|
||||
list_add_tail(&b->fl_entry, &a->fl_entry);
|
||||
return b;
|
||||
}
|
||||
|
||||
static struct nouveau_mm_node *
|
||||
nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
|
||||
{
|
||||
struct nouveau_mm_node *prev, *next;
|
||||
|
||||
/* try to merge with free adjacent entries of same type */
|
||||
prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
|
||||
if (this->nl_entry.prev != &rmm->nodes) {
|
||||
if (prev->free && prev->type == this->type) {
|
||||
prev->length += this->length;
|
||||
region_put(rmm, this);
|
||||
this = prev;
|
||||
}
|
||||
}
|
||||
|
||||
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
|
||||
if (this->nl_entry.next != &rmm->nodes) {
|
||||
if (next->free && next->type == this->type) {
|
||||
next->offset = this->offset;
|
||||
next->length += this->length;
|
||||
region_put(rmm, this);
|
||||
this = next;
|
||||
}
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
|
||||
{
|
||||
u32 block_s, block_l;
|
||||
|
||||
this->free = true;
|
||||
list_add(&this->fl_entry, &rmm->free);
|
||||
this = nouveau_mm_merge(rmm, this);
|
||||
|
||||
/* any entirely free blocks now? we'll want to remove typing
|
||||
* on them now so they can be use for any memory allocation
|
||||
*/
|
||||
block_s = roundup(this->offset, rmm->block_size);
|
||||
if (block_s + rmm->block_size > this->offset + this->length)
|
||||
return;
|
||||
|
||||
/* split off any still-typed region at the start */
|
||||
if (block_s != this->offset) {
|
||||
if (!region_split(rmm, this, block_s - this->offset))
|
||||
return;
|
||||
}
|
||||
|
||||
/* split off the soon-to-be-untyped block(s) */
|
||||
block_l = rounddown(this->length, rmm->block_size);
|
||||
if (block_l != this->length) {
|
||||
this = region_split(rmm, this, block_l);
|
||||
if (!this)
|
||||
return;
|
||||
}
|
||||
|
||||
/* mark as having no type, and retry merge with any adjacent
|
||||
* untyped blocks
|
||||
*/
|
||||
this->type = 0;
|
||||
nouveau_mm_merge(rmm, this);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
|
||||
u32 align, struct nouveau_mm_node **pnode)
|
||||
{
|
||||
struct nouveau_mm_node *this, *tmp, *next;
|
||||
u32 splitoff, avail, alloc;
|
||||
|
||||
list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
|
||||
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
|
||||
if (this->nl_entry.next == &rmm->nodes)
|
||||
next = NULL;
|
||||
|
||||
/* skip wrongly typed blocks */
|
||||
if (this->type && this->type != type)
|
||||
continue;
|
||||
|
||||
/* account for alignment */
|
||||
splitoff = this->offset & (align - 1);
|
||||
if (splitoff)
|
||||
splitoff = align - splitoff;
|
||||
|
||||
if (this->length <= splitoff)
|
||||
continue;
|
||||
|
||||
/* determine total memory available from this, and
|
||||
* the next block (if appropriate)
|
||||
*/
|
||||
avail = this->length;
|
||||
if (next && next->free && (!next->type || next->type == type))
|
||||
avail += next->length;
|
||||
|
||||
avail -= splitoff;
|
||||
|
||||
/* determine allocation size */
|
||||
if (size_nc) {
|
||||
alloc = min(avail, size);
|
||||
alloc = rounddown(alloc, size_nc);
|
||||
if (alloc == 0)
|
||||
continue;
|
||||
} else {
|
||||
alloc = size;
|
||||
if (avail < alloc)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* untyped block, split off a chunk that's a multiple
|
||||
* of block_size and type it
|
||||
*/
|
||||
if (!this->type) {
|
||||
u32 block = roundup(alloc + splitoff, rmm->block_size);
|
||||
if (this->length < block)
|
||||
continue;
|
||||
|
||||
this = region_split(rmm, this, block);
|
||||
if (!this)
|
||||
return -ENOMEM;
|
||||
|
||||
this->type = type;
|
||||
}
|
||||
|
||||
/* stealing memory from adjacent block */
|
||||
if (alloc > this->length) {
|
||||
u32 amount = alloc - (this->length - splitoff);
|
||||
|
||||
if (!next->type) {
|
||||
amount = roundup(amount, rmm->block_size);
|
||||
|
||||
next = region_split(rmm, next, amount);
|
||||
if (!next)
|
||||
return -ENOMEM;
|
||||
|
||||
next->type = type;
|
||||
}
|
||||
|
||||
this->length += amount;
|
||||
next->offset += amount;
|
||||
next->length -= amount;
|
||||
if (!next->length) {
|
||||
list_del(&next->nl_entry);
|
||||
list_del(&next->fl_entry);
|
||||
kfree(next);
|
||||
}
|
||||
}
|
||||
|
||||
if (splitoff) {
|
||||
if (!region_split(rmm, this, splitoff))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
this = region_split(rmm, this, alloc);
|
||||
if (this == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
this->free = false;
|
||||
list_del(&this->fl_entry);
|
||||
*pnode = this;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
|
||||
{
|
||||
struct nouveau_mm *rmm;
|
||||
struct nouveau_mm_node *heap;
|
||||
|
||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||
if (!heap)
|
||||
return -ENOMEM;
|
||||
heap->free = true;
|
||||
heap->offset = roundup(offset, block);
|
||||
heap->length = rounddown(offset + length, block) - heap->offset;
|
||||
|
||||
rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
|
||||
if (!rmm) {
|
||||
kfree(heap);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rmm->block_size = block;
|
||||
mutex_init(&rmm->mutex);
|
||||
INIT_LIST_HEAD(&rmm->nodes);
|
||||
INIT_LIST_HEAD(&rmm->free);
|
||||
list_add(&heap->nl_entry, &rmm->nodes);
|
||||
list_add(&heap->fl_entry, &rmm->free);
|
||||
|
||||
*prmm = rmm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mm_fini(struct nouveau_mm **prmm)
|
||||
{
|
||||
struct nouveau_mm *rmm = *prmm;
|
||||
struct nouveau_mm_node *heap =
|
||||
list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
|
||||
|
||||
if (!list_is_singular(&rmm->nodes))
|
||||
return -EBUSY;
|
||||
|
||||
kfree(heap);
|
||||
kfree(rmm);
|
||||
*prmm = NULL;
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#ifndef __NOUVEAU_REGION_H__
|
||||
#define __NOUVEAU_REGION_H__
|
||||
|
||||
struct nouveau_mm_node {
|
||||
struct list_head nl_entry;
|
||||
struct list_head fl_entry;
|
||||
struct list_head rl_entry;
|
||||
|
||||
bool free;
|
||||
int type;
|
||||
|
||||
u32 offset;
|
||||
u32 length;
|
||||
};
|
||||
|
||||
struct nouveau_mm {
|
||||
struct list_head nodes;
|
||||
struct list_head free;
|
||||
|
||||
struct mutex mutex;
|
||||
|
||||
u32 block_size;
|
||||
};
|
||||
|
||||
int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
|
||||
int nouveau_mm_fini(struct nouveau_mm **);
|
||||
int nouveau_mm_pre(struct nouveau_mm *);
|
||||
int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
|
||||
u32 align, struct nouveau_mm_node **);
|
||||
void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
|
||||
|
||||
int nv50_vram_init(struct drm_device *);
|
||||
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
|
||||
u32 memtype, struct nouveau_vram **);
|
||||
void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
|
||||
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
|
||||
|
||||
int nvc0_vram_init(struct drm_device *);
|
||||
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
|
||||
u32 memtype, struct nouveau_vram **);
|
||||
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
|
||||
|
||||
#endif
|
|
@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
int size, uint32_t *b_offset)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *nobj = NULL;
|
||||
struct drm_mm_node *mem;
|
||||
uint32_t offset;
|
||||
|
@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
|
||||
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
|
||||
target = NV_DMA_TARGET_VIDMEM;
|
||||
} else
|
||||
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
|
||||
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
|
||||
dev_priv->card_type < NV_50) {
|
||||
ret = nouveau_sgdma_get_page(dev, offset, &offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
target = NV_DMA_TARGET_PCI;
|
||||
} else {
|
||||
target = NV_DMA_TARGET_AGP;
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
offset += dev_priv->vm_gart_base;
|
||||
}
|
||||
} else {
|
||||
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
|
||||
chan->notifier_bo->bo.mem.mem_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
|
||||
target = NV_MEM_TARGET_VRAM;
|
||||
else
|
||||
target = NV_MEM_TARGET_GART;
|
||||
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
|
||||
offset += mem->start;
|
||||
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
|
||||
mem->size, NV_DMA_ACCESS_RW, target,
|
||||
mem->size, NV_MEM_ACCESS_RW, target,
|
||||
&nobj);
|
||||
if (ret) {
|
||||
drm_mm_put_block(mem);
|
||||
|
@ -181,15 +164,20 @@ int
|
|||
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_notifierobj_alloc *na = data;
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
|
||||
/* completely unnecessary for these chipsets... */
|
||||
if (unlikely(dev_priv->card_type >= NV_C0))
|
||||
return -EINVAL;
|
||||
|
||||
chan = nouveau_channel_get(dev, file_priv, na->channel);
|
||||
if (IS_ERR(chan))
|
||||
return PTR_ERR(chan);
|
||||
|
||||
ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
nouveau_channel_put(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -27,6 +27,10 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_pm.h"
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
#include <linux/acpi.h>
|
||||
#endif
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/hwmon-sysfs.h>
|
||||
|
||||
|
@ -418,8 +422,7 @@ nouveau_hwmon_init(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
dev_set_drvdata(hwmon_dev, dev);
|
||||
ret = sysfs_create_group(&hwmon_dev->kobj,
|
||||
&hwmon_attrgroup);
|
||||
ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
|
||||
if (ret) {
|
||||
NV_ERROR(dev,
|
||||
"Unable to create hwmon sysfs file: %d\n", ret);
|
||||
|
@ -446,6 +449,25 @@ nouveau_hwmon_fini(struct drm_device *dev)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int
|
||||
nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv =
|
||||
container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
|
||||
|
||||
if (strcmp(entry->device_class, "ac_adapter") == 0) {
|
||||
bool ac = power_supply_is_system_supplied();
|
||||
|
||||
NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
nouveau_pm_init(struct drm_device *dev)
|
||||
{
|
||||
|
@ -485,6 +507,10 @@ nouveau_pm_init(struct drm_device *dev)
|
|||
|
||||
nouveau_sysfs_init(dev);
|
||||
nouveau_hwmon_init(dev);
|
||||
#ifdef CONFIG_ACPI
|
||||
pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
|
||||
register_acpi_notifier(&pm->acpi_nb);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -503,6 +529,9 @@ nouveau_pm_fini(struct drm_device *dev)
|
|||
nouveau_perf_fini(dev);
|
||||
nouveau_volt_fini(dev);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
unregister_acpi_notifier(&pm->acpi_nb);
|
||||
#endif
|
||||
nouveau_hwmon_fini(dev);
|
||||
nouveau_sysfs_fini(dev);
|
||||
}
|
||||
|
|
|
@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
|
|||
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
|
||||
|
||||
if (dev_priv->card_type < NV_40) {
|
||||
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
|
||||
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
|
||||
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
|
||||
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
} else
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
ctx = (gpuobj->cinst >> 4) |
|
||||
ctx = (gpuobj->pinst >> 4) |
|
||||
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
|
||||
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
} else {
|
||||
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
|
||||
ctx = (gpuobj->cinst << 10) | 2;
|
||||
ctx = (gpuobj->cinst << 10) | chan->id;
|
||||
} else {
|
||||
ctx = (gpuobj->cinst >> 4) |
|
||||
((gpuobj->engine <<
|
||||
|
@ -214,18 +214,19 @@ out:
|
|||
spin_unlock_irqrestore(&chan->ramht->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
|
||||
{
|
||||
struct nouveau_ramht_entry *entry;
|
||||
|
||||
entry = nouveau_ramht_remove_entry(chan, handle);
|
||||
if (!entry)
|
||||
return;
|
||||
return -ENOENT;
|
||||
|
||||
nouveau_ramht_remove_hash(chan, entry->handle);
|
||||
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
|
||||
kfree(entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_gpuobj *
|
||||
|
|
|
@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
|
|||
|
||||
extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
|
||||
struct nouveau_gpuobj *);
|
||||
extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
|
||||
extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
|
||||
extern struct nouveau_gpuobj *
|
||||
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
|
||||
|
||||
|
|
|
@ -45,6 +45,11 @@
|
|||
# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
|
||||
#define NV04_PFB_PRE 0x001002d4
|
||||
# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
|
||||
#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i))
|
||||
# define NV20_PFB_ZCOMP_MODE_32 (4 << 24)
|
||||
# define NV20_PFB_ZCOMP_EN (1 << 31)
|
||||
# define NV25_PFB_ZCOMP_MODE_16 (1 << 20)
|
||||
# define NV25_PFB_ZCOMP_MODE_32 (2 << 20)
|
||||
#define NV10_PFB_CLOSE_PAGE2 0x0010033c
|
||||
#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
|
||||
#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
|
||||
|
@ -74,17 +79,6 @@
|
|||
# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
|
||||
# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
|
||||
|
||||
/* DMA object defines */
|
||||
#define NV_DMA_ACCESS_RW 0
|
||||
#define NV_DMA_ACCESS_RO 1
|
||||
#define NV_DMA_ACCESS_WO 2
|
||||
#define NV_DMA_TARGET_VIDMEM 0
|
||||
#define NV_DMA_TARGET_PCI 2
|
||||
#define NV_DMA_TARGET_AGP 3
|
||||
/* The following is not a real value used by the card, it's changed by
|
||||
* nouveau_object_dma_create */
|
||||
#define NV_DMA_TARGET_PCI_NONLINEAR 8
|
||||
|
||||
/* Some object classes we care about in the drm */
|
||||
#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
|
||||
#define NV_CLASS_DMA_TO_MEMORY 0x00000003
|
||||
|
@ -332,6 +326,7 @@
|
|||
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
|
||||
#define NV03_PGRAPH_STATUS 0x004006B0
|
||||
#define NV04_PGRAPH_STATUS 0x00400700
|
||||
# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
|
||||
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
|
||||
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
|
||||
#define NV04_PGRAPH_SURFACE 0x0040070C
|
||||
|
@ -378,6 +373,7 @@
|
|||
#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
|
||||
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
|
||||
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
|
||||
#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
|
||||
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
|
||||
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
|
||||
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
|
||||
|
@ -714,31 +710,32 @@
|
|||
#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
|
||||
#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
|
||||
#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
|
||||
#define NV50_PDISPLAY_INTR_EN 0x0061002c
|
||||
#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
|
||||
#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
|
||||
#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
|
||||
#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
|
||||
#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
|
||||
#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
|
||||
#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
|
||||
#define NV50_PDISPLAY_INTR_EN_0 0x00610028
|
||||
#define NV50_PDISPLAY_INTR_EN_1 0x0061002c
|
||||
#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c
|
||||
#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
|
||||
#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004
|
||||
#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008
|
||||
#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010
|
||||
#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020
|
||||
#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040
|
||||
#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
|
||||
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
|
||||
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
|
||||
#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
|
||||
#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
|
||||
#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
|
||||
#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
|
||||
#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
|
||||
#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
|
||||
#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
|
||||
#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
|
||||
#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
|
||||
#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
|
||||
#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
|
||||
#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
|
||||
#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
|
||||
#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
|
||||
#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080)
|
||||
#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084)
|
||||
#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200)
|
||||
#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010
|
||||
#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000
|
||||
#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010
|
||||
#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204)
|
||||
#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002
|
||||
#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000
|
||||
#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002
|
||||
#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001
|
||||
#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208)
|
||||
#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c)
|
||||
|
||||
#define NV50_PDISPLAY_CURSOR 0x00610270
|
||||
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
|
||||
|
@ -746,15 +743,11 @@
|
|||
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
|
||||
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
|
||||
|
||||
#define NV50_PDISPLAY_CTRL_STATE 0x00610300
|
||||
#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
|
||||
#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
|
||||
#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
|
||||
#define NV50_PDISPLAY_CTRL_VAL 0x00610304
|
||||
#define NV50_PDISPLAY_UNK_380 0x00610380
|
||||
#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
|
||||
#define NV50_PDISPLAY_UNK_388 0x00610388
|
||||
#define NV50_PDISPLAY_UNK_38C 0x0061038c
|
||||
#define NV50_PDISPLAY_PIO_CTRL 0x00610300
|
||||
#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000
|
||||
#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc
|
||||
#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001
|
||||
#define NV50_PDISPLAY_PIO_DATA 0x00610304
|
||||
|
||||
#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
|
||||
#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
|
||||
|
|
|
@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
|
|||
dma_addr_t *pages;
|
||||
unsigned nr_pages;
|
||||
|
||||
unsigned pte_start;
|
||||
u64 offset;
|
||||
bool bound;
|
||||
};
|
||||
|
||||
|
@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
|
||||
|
||||
if (dev_priv->card_type < NV_50)
|
||||
return pte + 2;
|
||||
|
||||
return pte << 1;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
{
|
||||
|
@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|||
|
||||
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
||||
|
||||
pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
|
||||
nvbe->pte_start = pte;
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||
dma_addr_t dma_offset = nvbe->pages[i];
|
||||
uint32_t offset_l = lower_32_bits(dma_offset);
|
||||
uint32_t offset_h = upper_32_bits(dma_offset);
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||
pte += 1;
|
||||
} else {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
|
||||
nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
|
||||
pte += 2;
|
||||
}
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.flush(nvbe->dev);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
dev_priv->engine.fifo.tlb_flush(dev);
|
||||
dev_priv->engine.graph.tlb_flush(dev);
|
||||
}
|
||||
|
||||
nvbe->bound = true;
|
||||
return 0;
|
||||
|
@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
|
|||
if (!nvbe->bound)
|
||||
return 0;
|
||||
|
||||
pte = nvbe->pte_start;
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
|
||||
pte += 1;
|
||||
} else {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
|
||||
pte += 2;
|
||||
}
|
||||
|
||||
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.flush(nvbe->dev);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
dev_priv->engine.fifo.tlb_flush(dev);
|
||||
dev_priv->engine.graph.tlb_flush(dev);
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
}
|
||||
|
||||
nvbe->bound = false;
|
||||
|
@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
|
|||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
|
||||
nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
|
||||
nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
|
||||
nvbe->bound = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sgdma_unbind(struct ttm_backend *be)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
|
||||
if (!nvbe->bound)
|
||||
return 0;
|
||||
|
||||
nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
|
||||
nvbe->nr_pages << PAGE_SHIFT);
|
||||
nvbe->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nouveau_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
|
@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
|
|||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static struct ttm_backend_func nv50_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
.bind = nv50_sgdma_bind,
|
||||
.unbind = nv50_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
struct ttm_backend *
|
||||
nouveau_sgdma_init_ttm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_sgdma_be *nvbe;
|
||||
|
||||
if (!dev_priv->gart_info.sg_ctxdma)
|
||||
return NULL;
|
||||
|
||||
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
||||
if (!nvbe)
|
||||
return NULL;
|
||||
|
||||
nvbe->dev = dev;
|
||||
|
||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||
|
||||
if (dev_priv->card_type < NV_50)
|
||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||
else
|
||||
nvbe->backend.func = &nv50_sgdma_backend;
|
||||
return &nvbe->backend;
|
||||
}
|
||||
|
||||
|
@ -218,7 +209,6 @@ int
|
|||
nouveau_sgdma_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct nouveau_gpuobj *gpuobj = NULL;
|
||||
uint32_t aper_size, obj_size;
|
||||
int i, ret;
|
||||
|
@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|||
|
||||
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
|
||||
obj_size += 8; /* ctxdma header */
|
||||
} else {
|
||||
/* 1 entire VM page table */
|
||||
aper_size = (512 * 1024 * 1024);
|
||||
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->gart_info.sg_dummy_page =
|
||||
alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
|
||||
if (!dev_priv->gart_info.sg_dummy_page) {
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
|
||||
dev_priv->gart_info.sg_dummy_bus =
|
||||
pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
/* special case, allocated from global instmem heap so
|
||||
* cinst is invalid, we use it on all channels though so
|
||||
* cinst needs to be valid, set it the same as pinst
|
||||
*/
|
||||
gpuobj->cinst = gpuobj->pinst;
|
||||
|
||||
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
|
||||
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
|
||||
* on those cards? */
|
||||
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
|
||||
(1 << 12) /* PT present */ |
|
||||
(0 << 13) /* PT *not* linear */ |
|
||||
(NV_DMA_ACCESS_RW << 14) |
|
||||
(NV_DMA_TARGET_PCI << 16));
|
||||
(0 << 14) /* RW */ |
|
||||
(2 << 16) /* PCI */);
|
||||
nv_wo32(gpuobj, 4, aper_size - 1);
|
||||
for (i = 2; i < 2 + (aper_size >> 12); i++) {
|
||||
nv_wo32(gpuobj, i * 4,
|
||||
dev_priv->gart_info.sg_dummy_bus | 3);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < obj_size; i += 8) {
|
||||
nv_wo32(gpuobj, i + 0, 0x00000000);
|
||||
nv_wo32(gpuobj, i + 4, 0x00000000);
|
||||
}
|
||||
for (i = 2; i < 2 + (aper_size >> 12); i++)
|
||||
nv_wo32(gpuobj, i * 4, 0x00000000);
|
||||
|
||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
} else
|
||||
if (dev_priv->chan_vm) {
|
||||
ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
|
||||
12, NV_MEM_ACCESS_RW,
|
||||
&dev_priv->gart_info.vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
|
||||
dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
|
||||
}
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->gart_info.sg_dummy_page) {
|
||||
pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
|
||||
NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
unlock_page(dev_priv->gart_info.sg_dummy_page);
|
||||
__free_page(dev_priv->gart_info.sg_dummy_page);
|
||||
dev_priv->gart_info.sg_dummy_page = NULL;
|
||||
dev_priv->gart_info.sg_dummy_bus = 0;
|
||||
}
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
||||
nouveau_vm_put(&dev_priv->gart_info.vma);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
|
||||
uint32_t
|
||||
nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
int pte;
|
||||
int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
|
||||
pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
*page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
|
||||
return 0;
|
||||
}
|
||||
BUG_ON(dev_priv->card_type >= NV_50);
|
||||
|
||||
NV_ERROR(dev, "Unimplemented on NV50\n");
|
||||
return -EINVAL;
|
||||
return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
|
||||
(offset & NV_CTXDMA_PAGE_MASK);
|
||||
}
|
||||
|
|
|
@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.populate = nv04_instmem_populate;
|
||||
engine->instmem.clear = nv04_instmem_clear;
|
||||
engine->instmem.bind = nv04_instmem_bind;
|
||||
engine->instmem.unbind = nv04_instmem_unbind;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->mc.init = nv04_mc_init;
|
||||
engine->mc.takedown = nv04_mc_takedown;
|
||||
|
@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv04_fb_init;
|
||||
engine->fb.takedown = nv04_fb_takedown;
|
||||
engine->graph.grclass = nv04_graph_grclass;
|
||||
engine->graph.init = nv04_graph_init;
|
||||
engine->graph.takedown = nv04_graph_takedown;
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
|
@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.unload_context = nv04_graph_unload_context;
|
||||
engine->fifo.channels = 16;
|
||||
engine->fifo.init = nv04_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.takedown = nv04_fifo_fini;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
|
@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.clock_get = nv04_pm_clock_get;
|
||||
engine->pm.clock_pre = nv04_pm_clock_pre;
|
||||
engine->pm.clock_set = nv04_pm_clock_set;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nouveau_mem_detect;
|
||||
engine->vram.flags_valid = nouveau_mem_flags_valid;
|
||||
break;
|
||||
case 0x10:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.populate = nv04_instmem_populate;
|
||||
engine->instmem.clear = nv04_instmem_clear;
|
||||
engine->instmem.bind = nv04_instmem_bind;
|
||||
engine->instmem.unbind = nv04_instmem_unbind;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->mc.init = nv04_mc_init;
|
||||
engine->mc.takedown = nv04_mc_takedown;
|
||||
|
@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv10_graph_grclass;
|
||||
engine->fb.init_tile_region = nv10_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv10_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv10_fb_free_tile_region;
|
||||
engine->graph.init = nv10_graph_init;
|
||||
engine->graph.takedown = nv10_graph_takedown;
|
||||
engine->graph.channel = nv10_graph_channel;
|
||||
|
@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv10_graph_load_context;
|
||||
engine->graph.unload_context = nv10_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv10_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.takedown = nv04_fifo_fini;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv10_fifo_destroy_context;
|
||||
engine->fifo.destroy_context = nv04_fifo_destroy_context;
|
||||
engine->fifo.load_context = nv10_fifo_load_context;
|
||||
engine->fifo.unload_context = nv10_fifo_unload_context;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
|
@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.clock_get = nv04_pm_clock_get;
|
||||
engine->pm.clock_pre = nv04_pm_clock_pre;
|
||||
engine->pm.clock_set = nv04_pm_clock_set;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nouveau_mem_detect;
|
||||
engine->vram.flags_valid = nouveau_mem_flags_valid;
|
||||
break;
|
||||
case 0x20:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.populate = nv04_instmem_populate;
|
||||
engine->instmem.clear = nv04_instmem_clear;
|
||||
engine->instmem.bind = nv04_instmem_bind;
|
||||
engine->instmem.unbind = nv04_instmem_unbind;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->mc.init = nv04_mc_init;
|
||||
engine->mc.takedown = nv04_mc_takedown;
|
||||
|
@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv20_graph_grclass;
|
||||
engine->fb.init_tile_region = nv10_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv10_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv10_fb_free_tile_region;
|
||||
engine->graph.init = nv20_graph_init;
|
||||
engine->graph.takedown = nv20_graph_takedown;
|
||||
engine->graph.channel = nv10_graph_channel;
|
||||
|
@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv20_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.takedown = nv04_fifo_fini;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv10_fifo_destroy_context;
|
||||
engine->fifo.destroy_context = nv04_fifo_destroy_context;
|
||||
engine->fifo.load_context = nv10_fifo_load_context;
|
||||
engine->fifo.unload_context = nv10_fifo_unload_context;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
|
@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.clock_get = nv04_pm_clock_get;
|
||||
engine->pm.clock_pre = nv04_pm_clock_pre;
|
||||
engine->pm.clock_set = nv04_pm_clock_set;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nouveau_mem_detect;
|
||||
engine->vram.flags_valid = nouveau_mem_flags_valid;
|
||||
break;
|
||||
case 0x30:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.populate = nv04_instmem_populate;
|
||||
engine->instmem.clear = nv04_instmem_clear;
|
||||
engine->instmem.bind = nv04_instmem_bind;
|
||||
engine->instmem.unbind = nv04_instmem_unbind;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->mc.init = nv04_mc_init;
|
||||
engine->mc.takedown = nv04_mc_takedown;
|
||||
|
@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv30_fb_init;
|
||||
engine->fb.takedown = nv30_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv30_graph_grclass;
|
||||
engine->fb.init_tile_region = nv30_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv10_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv30_fb_free_tile_region;
|
||||
engine->graph.init = nv30_graph_init;
|
||||
engine->graph.takedown = nv20_graph_takedown;
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
|
@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.destroy_context = nv20_graph_destroy_context;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv20_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.takedown = nv04_fifo_fini;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv10_fifo_destroy_context;
|
||||
engine->fifo.destroy_context = nv04_fifo_destroy_context;
|
||||
engine->fifo.load_context = nv10_fifo_load_context;
|
||||
engine->fifo.unload_context = nv10_fifo_unload_context;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
|
@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.clock_set = nv04_pm_clock_set;
|
||||
engine->pm.voltage_get = nouveau_voltage_gpio_get;
|
||||
engine->pm.voltage_set = nouveau_voltage_gpio_set;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nouveau_mem_detect;
|
||||
engine->vram.flags_valid = nouveau_mem_flags_valid;
|
||||
break;
|
||||
case 0x40:
|
||||
case 0x60:
|
||||
|
@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.populate = nv04_instmem_populate;
|
||||
engine->instmem.clear = nv04_instmem_clear;
|
||||
engine->instmem.bind = nv04_instmem_bind;
|
||||
engine->instmem.unbind = nv04_instmem_unbind;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->mc.init = nv40_mc_init;
|
||||
engine->mc.takedown = nv40_mc_takedown;
|
||||
|
@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv40_fb_init;
|
||||
engine->fb.takedown = nv40_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv40_graph_grclass;
|
||||
engine->fb.init_tile_region = nv30_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv40_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv30_fb_free_tile_region;
|
||||
engine->graph.init = nv40_graph_init;
|
||||
engine->graph.takedown = nv40_graph_takedown;
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
|
@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.destroy_context = nv40_graph_destroy_context;
|
||||
engine->graph.load_context = nv40_graph_load_context;
|
||||
engine->graph.unload_context = nv40_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv40_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv40_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.takedown = nv04_fifo_fini;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv40_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv40_fifo_destroy_context;
|
||||
engine->fifo.destroy_context = nv04_fifo_destroy_context;
|
||||
engine->fifo.load_context = nv40_fifo_load_context;
|
||||
engine->fifo.unload_context = nv40_fifo_unload_context;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
|
@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.voltage_get = nouveau_voltage_gpio_get;
|
||||
engine->pm.voltage_set = nouveau_voltage_gpio_set;
|
||||
engine->pm.temp_get = nv40_temp_get;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nouveau_mem_detect;
|
||||
engine->vram.flags_valid = nouveau_mem_flags_valid;
|
||||
break;
|
||||
case 0x50:
|
||||
case 0x80: /* gotta love NVIDIA's consistency.. */
|
||||
|
@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->instmem.takedown = nv50_instmem_takedown;
|
||||
engine->instmem.suspend = nv50_instmem_suspend;
|
||||
engine->instmem.resume = nv50_instmem_resume;
|
||||
engine->instmem.populate = nv50_instmem_populate;
|
||||
engine->instmem.clear = nv50_instmem_clear;
|
||||
engine->instmem.bind = nv50_instmem_bind;
|
||||
engine->instmem.unbind = nv50_instmem_unbind;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
if (dev_priv->chipset == 0x50)
|
||||
engine->instmem.flush = nv50_instmem_flush;
|
||||
else
|
||||
|
@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv50_fb_init;
|
||||
engine->fb.takedown = nv50_fb_takedown;
|
||||
engine->graph.grclass = nv50_graph_grclass;
|
||||
engine->graph.init = nv50_graph_init;
|
||||
engine->graph.takedown = nv50_graph_takedown;
|
||||
engine->graph.fifo_access = nv50_graph_fifo_access;
|
||||
|
@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->display.init = nv50_display_init;
|
||||
engine->display.destroy = nv50_display_destroy;
|
||||
engine->gpio.init = nv50_gpio_init;
|
||||
engine->gpio.takedown = nouveau_stub_takedown;
|
||||
engine->gpio.takedown = nv50_gpio_fini;
|
||||
engine->gpio.get = nv50_gpio_get;
|
||||
engine->gpio.set = nv50_gpio_set;
|
||||
engine->gpio.irq_register = nv50_gpio_irq_register;
|
||||
engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
|
||||
engine->gpio.irq_enable = nv50_gpio_irq_enable;
|
||||
switch (dev_priv->chipset) {
|
||||
case 0xa3:
|
||||
case 0xa5:
|
||||
case 0xa8:
|
||||
case 0xaf:
|
||||
engine->pm.clock_get = nva3_pm_clock_get;
|
||||
engine->pm.clock_pre = nva3_pm_clock_pre;
|
||||
engine->pm.clock_set = nva3_pm_clock_set;
|
||||
break;
|
||||
default:
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0x98:
|
||||
case 0xa0:
|
||||
case 0xaa:
|
||||
case 0xac:
|
||||
case 0x50:
|
||||
engine->pm.clock_get = nv50_pm_clock_get;
|
||||
engine->pm.clock_pre = nv50_pm_clock_pre;
|
||||
engine->pm.clock_set = nv50_pm_clock_set;
|
||||
break;
|
||||
default:
|
||||
engine->pm.clock_get = nva3_pm_clock_get;
|
||||
engine->pm.clock_pre = nva3_pm_clock_pre;
|
||||
engine->pm.clock_set = nva3_pm_clock_set;
|
||||
break;
|
||||
}
|
||||
engine->pm.voltage_get = nouveau_voltage_gpio_get;
|
||||
engine->pm.voltage_set = nouveau_voltage_gpio_set;
|
||||
|
@ -406,17 +436,39 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->pm.temp_get = nv84_temp_get;
|
||||
else
|
||||
engine->pm.temp_get = nv40_temp_get;
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0xa0:
|
||||
engine->crypt.init = nv84_crypt_init;
|
||||
engine->crypt.takedown = nv84_crypt_fini;
|
||||
engine->crypt.create_context = nv84_crypt_create_context;
|
||||
engine->crypt.destroy_context = nv84_crypt_destroy_context;
|
||||
engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
|
||||
break;
|
||||
default:
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
break;
|
||||
}
|
||||
engine->vram.init = nv50_vram_init;
|
||||
engine->vram.get = nv50_vram_new;
|
||||
engine->vram.put = nv50_vram_del;
|
||||
engine->vram.flags_valid = nv50_vram_flags_valid;
|
||||
break;
|
||||
case 0xC0:
|
||||
engine->instmem.init = nvc0_instmem_init;
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
engine->instmem.resume = nvc0_instmem_resume;
|
||||
engine->instmem.populate = nvc0_instmem_populate;
|
||||
engine->instmem.clear = nvc0_instmem_clear;
|
||||
engine->instmem.bind = nvc0_instmem_bind;
|
||||
engine->instmem.unbind = nvc0_instmem_unbind;
|
||||
engine->instmem.flush = nvc0_instmem_flush;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->mc.init = nv50_mc_init;
|
||||
engine->mc.takedown = nv50_mc_takedown;
|
||||
engine->timer.init = nv04_timer_init;
|
||||
|
@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nvc0_fb_init;
|
||||
engine->fb.takedown = nvc0_fb_takedown;
|
||||
engine->graph.grclass = NULL; //nvc0_graph_grclass;
|
||||
engine->graph.init = nvc0_graph_init;
|
||||
engine->graph.takedown = nvc0_graph_takedown;
|
||||
engine->graph.fifo_access = nvc0_graph_fifo_access;
|
||||
|
@ -453,7 +504,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->gpio.takedown = nouveau_stub_takedown;
|
||||
engine->gpio.get = nv50_gpio_get;
|
||||
engine->gpio.set = nv50_gpio_set;
|
||||
engine->gpio.irq_register = nv50_gpio_irq_register;
|
||||
engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
|
||||
engine->gpio.irq_enable = nv50_gpio_irq_enable;
|
||||
engine->crypt.init = nouveau_stub_init;
|
||||
engine->crypt.takedown = nouveau_stub_takedown;
|
||||
engine->vram.init = nvc0_vram_init;
|
||||
engine->vram.get = nvc0_vram_new;
|
||||
engine->vram.put = nv50_vram_del;
|
||||
engine->vram.flags_valid = nvc0_vram_flags_valid;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
|
||||
|
@ -493,9 +552,13 @@ nouveau_card_init_channel(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* no dma objects on fermi... */
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
goto out_done;
|
||||
|
||||
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->vram_size,
|
||||
NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
|
||||
NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
|
||||
&gpuobj);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
@ -505,9 +568,10 @@ nouveau_card_init_channel(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
|
||||
dev_priv->gart_info.aper_size,
|
||||
NV_DMA_ACCESS_RW, &gpuobj, NULL);
|
||||
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
|
||||
0, dev_priv->gart_info.aper_size,
|
||||
NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
|
||||
&gpuobj);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
@ -516,11 +580,12 @@ nouveau_card_init_channel(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
out_done:
|
||||
mutex_unlock(&dev_priv->channel->mutex);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
nouveau_channel_free(dev_priv->channel);
|
||||
dev_priv->channel = NULL;
|
||||
nouveau_channel_put(&dev_priv->channel);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -531,15 +596,25 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
|
|||
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
|
||||
if (state == VGA_SWITCHEROO_ON) {
|
||||
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
nouveau_pci_resume(pdev);
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
} else {
|
||||
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
nouveau_pci_suspend(pdev, pmm);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
||||
}
|
||||
}
|
||||
|
||||
static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
nouveau_fbcon_output_poll_changed(dev);
|
||||
}
|
||||
|
||||
static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
@ -560,6 +635,7 @@ nouveau_card_init(struct drm_device *dev)
|
|||
|
||||
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
|
||||
vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
|
||||
nouveau_switcheroo_reprobe,
|
||||
nouveau_switcheroo_can_switch);
|
||||
|
||||
/* Initialise internal driver API hooks */
|
||||
|
@ -567,6 +643,8 @@ nouveau_card_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out;
|
||||
engine = &dev_priv->engine;
|
||||
spin_lock_init(&dev_priv->channels.lock);
|
||||
spin_lock_init(&dev_priv->tile.lock);
|
||||
spin_lock_init(&dev_priv->context_switch_lock);
|
||||
|
||||
/* Make the CRTCs and I2C buses accessible */
|
||||
|
@ -625,26 +703,28 @@ nouveau_card_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_fb;
|
||||
|
||||
/* PCRYPT */
|
||||
ret = engine->crypt.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
goto out_crypt;
|
||||
}
|
||||
|
||||
ret = engine->display.create(dev);
|
||||
if (ret)
|
||||
goto out_fifo;
|
||||
|
||||
/* this call irq_preinstall, register irq handler and
|
||||
* call irq_postinstall
|
||||
*/
|
||||
ret = drm_irq_install(dev);
|
||||
ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
|
||||
if (ret)
|
||||
goto out_display;
|
||||
goto out_vblank;
|
||||
|
||||
ret = drm_vblank_init(dev, 0);
|
||||
ret = nouveau_irq_init(dev);
|
||||
if (ret)
|
||||
goto out_irq;
|
||||
goto out_vblank;
|
||||
|
||||
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
|
||||
|
||||
|
@ -669,12 +749,16 @@ nouveau_card_init(struct drm_device *dev)
|
|||
out_fence:
|
||||
nouveau_fence_fini(dev);
|
||||
out_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
out_display:
|
||||
nouveau_irq_fini(dev);
|
||||
out_vblank:
|
||||
drm_vblank_cleanup(dev);
|
||||
engine->display.destroy(dev);
|
||||
out_fifo:
|
||||
if (!nouveau_noaccel)
|
||||
engine->fifo.takedown(dev);
|
||||
out_crypt:
|
||||
if (!nouveau_noaccel)
|
||||
engine->crypt.takedown(dev);
|
||||
out_graph:
|
||||
if (!nouveau_noaccel)
|
||||
engine->graph.takedown(dev);
|
||||
|
@ -713,12 +797,12 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
|||
|
||||
if (!engine->graph.accel_blocked) {
|
||||
nouveau_fence_fini(dev);
|
||||
nouveau_channel_free(dev_priv->channel);
|
||||
dev_priv->channel = NULL;
|
||||
nouveau_channel_put_unlocked(&dev_priv->channel);
|
||||
}
|
||||
|
||||
if (!nouveau_noaccel) {
|
||||
engine->fifo.takedown(dev);
|
||||
engine->crypt.takedown(dev);
|
||||
engine->graph.takedown(dev);
|
||||
}
|
||||
engine->fb.takedown(dev);
|
||||
|
@ -737,7 +821,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
|||
nouveau_gpuobj_takedown(dev);
|
||||
nouveau_mem_vram_fini(dev);
|
||||
|
||||
drm_irq_uninstall(dev);
|
||||
nouveau_irq_fini(dev);
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
nouveau_pm_fini(dev);
|
||||
nouveau_bios_takedown(dev);
|
||||
|
@ -980,6 +1065,7 @@ err_out:
|
|||
|
||||
void nouveau_lastclose(struct drm_device *dev)
|
||||
{
|
||||
vga_switcheroo_process_delayed_switch();
|
||||
}
|
||||
|
||||
int nouveau_unload(struct drm_device *dev)
|
||||
|
@ -1024,21 +1110,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
|
|||
else
|
||||
getparam->value = NV_PCI;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_FB_PHYSICAL:
|
||||
getparam->value = dev_priv->fb_phys;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_AGP_PHYSICAL:
|
||||
getparam->value = dev_priv->gart_info.aper_base;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_PCI_PHYSICAL:
|
||||
if (dev->sg) {
|
||||
getparam->value = (unsigned long)dev->sg->virtual;
|
||||
} else {
|
||||
NV_ERROR(dev, "Requested PCIGART address, "
|
||||
"while no PCIGART was created\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_FB_SIZE:
|
||||
getparam->value = dev_priv->fb_available_size;
|
||||
break;
|
||||
|
@ -1046,7 +1117,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
|
|||
getparam->value = dev_priv->gart_info.aper_size;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
|
||||
getparam->value = dev_priv->vm_vram_base;
|
||||
getparam->value = 0; /* deprecated */
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_PTIMER_TIME:
|
||||
getparam->value = dev_priv->engine.timer.read(dev);
|
||||
|
@ -1054,6 +1125,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
|
|||
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
|
||||
getparam->value = 1;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
|
||||
getparam->value = (dev_priv->card_type < NV_50);
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_GRAPH_UNITS:
|
||||
/* NV40 and NV50 versions are quite different, but register
|
||||
* address is the same. User is supposed to know the card
|
||||
|
@ -1087,8 +1161,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
/* Wait until (value(reg) & mask) == val, up until timeout has hit */
|
||||
bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
|
||||
uint32_t reg, uint32_t mask, uint32_t val)
|
||||
bool
|
||||
nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
|
||||
uint32_t reg, uint32_t mask, uint32_t val)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
|
||||
|
@ -1102,10 +1177,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Wait until (value(reg) & mask) != val, up until timeout has hit */
|
||||
bool
|
||||
nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
|
||||
uint32_t reg, uint32_t mask, uint32_t val)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
|
||||
uint64_t start = ptimer->read(dev);
|
||||
|
||||
do {
|
||||
if ((nv_rd32(dev, reg) & mask) != val)
|
||||
return true;
|
||||
} while (ptimer->read(dev) - start < timeout);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Waits for PGRAPH to go completely idle */
|
||||
bool nouveau_wait_for_idle(struct drm_device *dev)
|
||||
{
|
||||
if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t mask = ~0;
|
||||
|
||||
if (dev_priv->card_type == NV_40)
|
||||
mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
|
||||
|
||||
if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
|
||||
NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
|
||||
nv_rd32(dev, NV04_PGRAPH_STATUS));
|
||||
return false;
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (C) 2010 Nouveau Project
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include "nouveau_util.h"
|
||||
|
||||
static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
|
||||
|
||||
void
|
||||
nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
|
||||
{
|
||||
while (bf->name) {
|
||||
if (value & bf->mask) {
|
||||
printk(" %s", bf->name);
|
||||
value &= ~bf->mask;
|
||||
}
|
||||
|
||||
bf++;
|
||||
}
|
||||
|
||||
if (value)
|
||||
printk(" (unknown bits 0x%08x)", value);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_enum_print(const struct nouveau_enum *en, u32 value)
|
||||
{
|
||||
while (en->name) {
|
||||
if (value == en->value) {
|
||||
printk("%s", en->name);
|
||||
return;
|
||||
}
|
||||
|
||||
en++;
|
||||
}
|
||||
|
||||
printk("(unknown enum 0x%08x)", value);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_ratelimit(void)
|
||||
{
|
||||
return __ratelimit(&nouveau_ratelimit_state);
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (C) 2010 Nouveau Project
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __NOUVEAU_UTIL_H__
|
||||
#define __NOUVEAU_UTIL_H__
|
||||
|
||||
struct nouveau_bitfield {
|
||||
u32 mask;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct nouveau_enum {
|
||||
u32 value;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
|
||||
void nouveau_enum_print(const struct nouveau_enum *, u32 value);
|
||||
int nouveau_ratelimit(void);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,439 @@
|
|||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mm.h"
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
void
|
||||
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
struct nouveau_mm_node *r;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 end, len;
|
||||
|
||||
list_for_each_entry(r, &vram->regions, rl_entry) {
|
||||
u64 phys = (u64)r->offset << 12;
|
||||
u32 num = r->length >> bits;
|
||||
|
||||
while (num) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
vm->map(vma, pgt, vram, pte, len, phys);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
if (unlikely(end >= max)) {
|
||||
pde++;
|
||||
pte = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
|
||||
{
|
||||
nouveau_vm_map_at(vma, 0, vram);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
dma_addr_t *list)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
vm->map_sg(vma, pgt, pte, list, len);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
list += len;
|
||||
if (unlikely(end >= max)) {
|
||||
pde++;
|
||||
pte = 0;
|
||||
}
|
||||
}
|
||||
|
||||
vm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
vm->unmap(pgt, pte, len);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
if (unlikely(end >= max)) {
|
||||
pde++;
|
||||
pte = 0;
|
||||
}
|
||||
}
|
||||
|
||||
vm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_unmap(struct nouveau_vma *vma)
|
||||
{
|
||||
nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
{
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
struct nouveau_vm_pgt *vpgt;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
u32 pde;
|
||||
|
||||
for (pde = fpde; pde <= lpde; pde++) {
|
||||
vpgt = &vm->pgt[pde - vm->fpde];
|
||||
if (--vpgt->refcount[big])
|
||||
continue;
|
||||
|
||||
pgt = vpgt->obj[big];
|
||||
vpgt->obj[big] = NULL;
|
||||
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
}
|
||||
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
nouveau_gpuobj_ref(NULL, &pgt);
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
|
||||
{
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
int big = (type != vm->spg_shift);
|
||||
u32 pgt_size;
|
||||
int ret;
|
||||
|
||||
pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
|
||||
pgt_size *= 8;
|
||||
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
/* someone beat us to filling the PDE while we didn't have the lock */
|
||||
if (unlikely(vpgt->refcount[big]++)) {
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
nouveau_gpuobj_ref(NULL, &pgt);
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
vpgt->obj[big] = pgt;
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
|
||||
u32 access, struct nouveau_vma *vma)
|
||||
{
|
||||
u32 align = (1 << page_shift) >> 12;
|
||||
u32 msize = size >> 12;
|
||||
u32 fpde, lpde, pde;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
|
||||
if (unlikely(ret != 0)) {
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
fpde = (vma->node->offset >> vm->pgt_bits);
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
|
||||
for (pde = fpde; pde <= lpde; pde++) {
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
|
||||
int big = (vma->node->type != vm->spg_shift);
|
||||
|
||||
if (likely(vpgt->refcount[big])) {
|
||||
vpgt->refcount[big]++;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
|
||||
if (ret) {
|
||||
if (pde != fpde)
|
||||
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
|
||||
nouveau_mm_put(vm->mm, vma->node);
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
vma->node = NULL;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
|
||||
vma->vm = vm;
|
||||
vma->offset = (u64)vma->node->offset << 12;
|
||||
vma->access = access;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_put(struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
u32 fpde, lpde;
|
||||
|
||||
if (unlikely(vma->node == NULL))
|
||||
return;
|
||||
fpde = (vma->node->offset >> vm->pgt_bits);
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
|
||||
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
|
||||
nouveau_mm_put(vm->mm, vma->node);
|
||||
vma->node = NULL;
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
|
||||
struct nouveau_vm **pvm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_vm *vm;
|
||||
u64 mm_length = (offset + length) - mm_offset;
|
||||
u32 block, pgt_bits;
|
||||
int ret;
|
||||
|
||||
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
|
||||
if (!vm)
|
||||
return -ENOMEM;
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
vm->map_pgt = nv50_vm_map_pgt;
|
||||
vm->map = nv50_vm_map;
|
||||
vm->map_sg = nv50_vm_map_sg;
|
||||
vm->unmap = nv50_vm_unmap;
|
||||
vm->flush = nv50_vm_flush;
|
||||
vm->spg_shift = 12;
|
||||
vm->lpg_shift = 16;
|
||||
|
||||
pgt_bits = 29;
|
||||
block = (1 << pgt_bits);
|
||||
if (length < block)
|
||||
block = length;
|
||||
|
||||
} else
|
||||
if (dev_priv->card_type == NV_C0) {
|
||||
vm->map_pgt = nvc0_vm_map_pgt;
|
||||
vm->map = nvc0_vm_map;
|
||||
vm->map_sg = nvc0_vm_map_sg;
|
||||
vm->unmap = nvc0_vm_unmap;
|
||||
vm->flush = nvc0_vm_flush;
|
||||
vm->spg_shift = 12;
|
||||
vm->lpg_shift = 17;
|
||||
pgt_bits = 27;
|
||||
|
||||
/* Should be 4096 everywhere, this is a hack that's
|
||||
* currently necessary to avoid an elusive bug that
|
||||
* causes corruption when mixing small/large pages
|
||||
*/
|
||||
if (length < (1ULL << 40))
|
||||
block = 4096;
|
||||
else {
|
||||
block = (1 << pgt_bits);
|
||||
if (length < block)
|
||||
block = length;
|
||||
}
|
||||
} else {
|
||||
kfree(vm);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
vm->fpde = offset >> pgt_bits;
|
||||
vm->lpde = (offset + length - 1) >> pgt_bits;
|
||||
vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
|
||||
if (!vm->pgt) {
|
||||
kfree(vm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&vm->pgd_list);
|
||||
vm->dev = dev;
|
||||
vm->refcount = 1;
|
||||
vm->pgt_bits = pgt_bits - 12;
|
||||
|
||||
ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
|
||||
block >> 12);
|
||||
if (ret) {
|
||||
kfree(vm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pvm = vm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
|
||||
{
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
int i;
|
||||
|
||||
if (!pgd)
|
||||
return 0;
|
||||
|
||||
vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
|
||||
if (!vpgd)
|
||||
return -ENOMEM;
|
||||
|
||||
nouveau_gpuobj_ref(pgd, &vpgd->obj);
|
||||
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
for (i = vm->fpde; i <= vm->lpde; i++)
|
||||
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
|
||||
list_add(&vpgd->head, &vm->pgd_list);
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
|
||||
{
|
||||
struct nouveau_vm_pgd *vpgd, *tmp;
|
||||
|
||||
if (!pgd)
|
||||
return;
|
||||
|
||||
mutex_lock(&vm->mm->mutex);
|
||||
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
|
||||
if (vpgd->obj != pgd)
|
||||
continue;
|
||||
|
||||
list_del(&vpgd->head);
|
||||
nouveau_gpuobj_ref(NULL, &vpgd->obj);
|
||||
kfree(vpgd);
|
||||
}
|
||||
mutex_unlock(&vm->mm->mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vm_del(struct nouveau_vm *vm)
|
||||
{
|
||||
struct nouveau_vm_pgd *vpgd, *tmp;
|
||||
|
||||
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
|
||||
nouveau_vm_unlink(vm, vpgd->obj);
|
||||
}
|
||||
WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
|
||||
|
||||
kfree(vm->pgt);
|
||||
kfree(vm);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
|
||||
struct nouveau_gpuobj *pgd)
|
||||
{
|
||||
struct nouveau_vm *vm;
|
||||
int ret;
|
||||
|
||||
vm = ref;
|
||||
if (vm) {
|
||||
ret = nouveau_vm_link(vm, pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vm->refcount++;
|
||||
}
|
||||
|
||||
vm = *ptr;
|
||||
*ptr = ref;
|
||||
|
||||
if (vm) {
|
||||
nouveau_vm_unlink(vm, pgd);
|
||||
|
||||
if (--vm->refcount == 0)
|
||||
nouveau_vm_del(vm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#ifndef __NOUVEAU_VM_H__
|
||||
#define __NOUVEAU_VM_H__
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
struct nouveau_vm_pgt {
|
||||
struct nouveau_gpuobj *obj[2];
|
||||
u32 refcount[2];
|
||||
};
|
||||
|
||||
struct nouveau_vm_pgd {
|
||||
struct list_head head;
|
||||
struct nouveau_gpuobj *obj;
|
||||
};
|
||||
|
||||
struct nouveau_vma {
|
||||
struct nouveau_vm *vm;
|
||||
struct nouveau_mm_node *node;
|
||||
u64 offset;
|
||||
u32 access;
|
||||
};
|
||||
|
||||
struct nouveau_vm {
|
||||
struct drm_device *dev;
|
||||
struct nouveau_mm *mm;
|
||||
int refcount;
|
||||
|
||||
struct list_head pgd_list;
|
||||
atomic_t pgraph_refs;
|
||||
atomic_t pcrypt_refs;
|
||||
|
||||
struct nouveau_vm_pgt *pgt;
|
||||
u32 fpde;
|
||||
u32 lpde;
|
||||
|
||||
u32 pgt_bits;
|
||||
u8 spg_shift;
|
||||
u8 lpg_shift;
|
||||
|
||||
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
|
||||
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
u32 pte, dma_addr_t *, u32 cnt);
|
||||
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
|
||||
void (*flush)(struct nouveau_vm *);
|
||||
};
|
||||
|
||||
/* nouveau_vm.c */
|
||||
int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
|
||||
struct nouveau_vm **);
|
||||
int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
|
||||
struct nouveau_gpuobj *pgd);
|
||||
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
|
||||
u32 access, struct nouveau_vma *);
|
||||
void nouveau_vm_put(struct nouveau_vma *);
|
||||
void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
|
||||
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
|
||||
void nouveau_vm_unmap(struct nouveau_vma *);
|
||||
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
|
||||
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
|
||||
dma_addr_t *);
|
||||
|
||||
/* nv50_vm.c */
|
||||
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
|
||||
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
u32 pte, dma_addr_t *, u32 cnt);
|
||||
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||
void nv50_vm_flush(struct nouveau_vm *);
|
||||
void nv50_vm_flush_engine(struct drm_device *, int engine);
|
||||
|
||||
/* nvc0_vm.c */
|
||||
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
|
||||
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
u32 pte, dma_addr_t *, u32 cnt);
|
||||
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||
void nvc0_vm_flush(struct nouveau_vm *);
|
||||
|
||||
#endif
|
|
@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
|
|||
if (dev_priv->card_type >= NV_30)
|
||||
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
|
||||
|
||||
regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
|
||||
if (dev_priv->card_type >= NV_10)
|
||||
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
|
||||
else
|
||||
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
|
||||
|
||||
/* Some misc regs */
|
||||
if (dev_priv->card_type == NV_40) {
|
||||
|
@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
|
|||
if (nv_two_heads(dev))
|
||||
NVSetOwner(dev, nv_crtc->index);
|
||||
|
||||
drm_vblank_pre_modeset(dev, nv_crtc->index);
|
||||
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
|
||||
NVBlankScreen(dev, nv_crtc->index, true);
|
||||
|
@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
|
|||
#endif
|
||||
|
||||
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
|
||||
drm_vblank_post_modeset(dev, nv_crtc->index);
|
||||
}
|
||||
|
||||
static void nv_crtc_destroy(struct drm_crtc *crtc)
|
||||
|
@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
|
|||
.cursor_move = nv04_crtc_cursor_move,
|
||||
.gamma_set = nv_crtc_gamma_set,
|
||||
.set_config = drm_crtc_helper_set_config,
|
||||
.page_flip = nouveau_crtc_page_flip,
|
||||
.destroy = nv_crtc_destroy,
|
||||
};
|
||||
|
||||
|
|
|
@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
|
|||
* use a 10ms timeout (guards against crtc being inactive, in
|
||||
* which case blank state would never change)
|
||||
*/
|
||||
if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
|
||||
0x00000001, 0x00000000))
|
||||
if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
|
||||
0x00000001, 0x00000000))
|
||||
return -EBUSY;
|
||||
if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
|
||||
0x00000001, 0x00000001))
|
||||
if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
|
||||
0x00000001, 0x00000001))
|
||||
return -EBUSY;
|
||||
if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
|
||||
0x00000001, 0x00000000))
|
||||
if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
|
||||
0x00000001, 0x00000000))
|
||||
return -EBUSY;
|
||||
|
||||
udelay(100);
|
||||
|
|
|
@ -32,6 +32,9 @@
|
|||
#include "nouveau_encoder.h"
|
||||
#include "nouveau_connector.h"
|
||||
|
||||
static void nv04_vblank_crtc0_isr(struct drm_device *);
|
||||
static void nv04_vblank_crtc1_isr(struct drm_device *);
|
||||
|
||||
static void
|
||||
nv04_display_store_initial_head_owner(struct drm_device *dev)
|
||||
{
|
||||
|
@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev)
|
|||
func->save(encoder);
|
||||
}
|
||||
|
||||
nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
|
||||
nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev)
|
|||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
nouveau_irq_unregister(dev, 24);
|
||||
nouveau_irq_unregister(dev, 25);
|
||||
|
||||
/* Turn every CRTC off. */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_mode_set modeset = {
|
||||
|
@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_vblank_crtc0_isr(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
|
||||
drm_handle_vblank(dev, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_vblank_crtc1_isr(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
|
||||
drm_handle_vblank(dev, 1);
|
||||
}
|
||||
|
|
|
@ -28,52 +28,39 @@
|
|||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
|
||||
void
|
||||
int
|
||||
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
}
|
||||
|
||||
if (info->flags & FBINFO_HWACCEL_DISABLED) {
|
||||
cfb_copyarea(info, region);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
|
||||
OUT_RING(chan, (region->sy << 16) | region->sx);
|
||||
OUT_RING(chan, (region->dy << 16) | region->dx);
|
||||
OUT_RING(chan, (region->height << 16) | region->width);
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
}
|
||||
|
||||
if (info->flags & FBINFO_HWACCEL_DISABLED) {
|
||||
cfb_fillrect(info, rect);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, 7);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
|
||||
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
|
||||
|
@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|||
OUT_RING(chan, (rect->dx << 16) | rect->dy);
|
||||
OUT_RING(chan, (rect->width << 16) | rect->height);
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
|
@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
uint32_t dsize;
|
||||
uint32_t width;
|
||||
uint32_t *data = (uint32_t *)image->data;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
if (image->depth != 1)
|
||||
return -ENODEV;
|
||||
|
||||
if (image->depth != 1) {
|
||||
cfb_imageblit(info, image);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
}
|
||||
|
||||
if (info->flags & FBINFO_HWACCEL_DISABLED) {
|
||||
cfb_imageblit(info, image);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, 8);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
width = ALIGN(image->width, 8);
|
||||
dsize = ALIGN(width * image->height, 32) >> 5;
|
||||
|
@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
while (dsize) {
|
||||
int iter_len = dsize > 128 ? 128 : dsize;
|
||||
|
||||
if (RING_SPACE(chan, iter_len + 1)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
cfb_imageblit(info, image);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, iter_len + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
|
||||
OUT_RINGp(chan, data, iter_len);
|
||||
|
@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
}
|
||||
|
||||
FIRE_RING(chan);
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
|
||||
0x0062 : 0x0042, NvCtxSurf2D);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
|
||||
dev_priv->card_type >= NV_10 ?
|
||||
0x0062 : 0x0042);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
|
||||
0x009f : 0x005f, NvImageBlit);
|
||||
ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
|
||||
dev_priv->chipset >= 0x11 ?
|
||||
0x009f : 0x005f);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_util.h"
|
||||
|
||||
#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
|
||||
#define NV04_RAMFC__SIZE 32
|
||||
|
@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV03_USER(chan->id), PAGE_SIZE);
|
||||
if (!chan->user)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
/* Setup initial state */
|
||||
|
@ -151,10 +157,31 @@ void
|
|||
nv04_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
unsigned long flags;
|
||||
|
||||
nv_wr32(dev, NV04_PFIFO_MODE,
|
||||
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
pfifo->reassign(dev, false);
|
||||
|
||||
/* Unload the context if it's the currently active one */
|
||||
if (pfifo->channel_id(dev) == chan->id) {
|
||||
pfifo->disable(dev);
|
||||
pfifo->unload_context(dev);
|
||||
pfifo->enable(dev);
|
||||
}
|
||||
|
||||
/* Keep it from being rescheduled */
|
||||
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
|
||||
|
||||
pfifo->reassign(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
/* Free the channel resources */
|
||||
if (chan->user) {
|
||||
iounmap(chan->user);
|
||||
chan->user = NULL;
|
||||
}
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
}
|
||||
|
||||
|
@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
|
|||
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
|
||||
return 0;
|
||||
|
||||
chan = dev_priv->fifos[chid];
|
||||
chan = dev_priv->channels.ptr[chid];
|
||||
if (!chan) {
|
||||
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
|
||||
return -EINVAL;
|
||||
|
@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
|
|||
static void
|
||||
nv04_fifo_init_intr(struct drm_device *dev)
|
||||
{
|
||||
nouveau_irq_register(dev, 8, nv04_fifo_isr);
|
||||
nv_wr32(dev, 0x002100, 0xffffffff);
|
||||
nv_wr32(dev, 0x002140, 0xffffffff);
|
||||
}
|
||||
|
@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev)
|
|||
pfifo->reassign(dev, true);
|
||||
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
if (dev_priv->fifos[i]) {
|
||||
if (dev_priv->channels.ptr[i]) {
|
||||
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
|
||||
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
|
||||
}
|
||||
|
@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_fini(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, 0x2140, 0x00000000);
|
||||
nouveau_irq_unregister(dev, 8);
|
||||
}
|
||||
|
||||
static bool
|
||||
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = NULL;
|
||||
struct nouveau_gpuobj *obj;
|
||||
unsigned long flags;
|
||||
const int subc = (addr >> 13) & 0x7;
|
||||
const int mthd = addr & 0x1ffc;
|
||||
bool handled = false;
|
||||
u32 engine;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
|
||||
chan = dev_priv->channels.ptr[chid];
|
||||
if (unlikely(!chan))
|
||||
goto out;
|
||||
|
||||
switch (mthd) {
|
||||
case 0x0000: /* bind object to subchannel */
|
||||
obj = nouveau_ramht_find(chan, data);
|
||||
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
|
||||
break;
|
||||
|
||||
chan->sw_subchannel[subc] = obj->class;
|
||||
engine = 0x0000000f << (subc * 4);
|
||||
|
||||
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
|
||||
handled = true;
|
||||
break;
|
||||
default:
|
||||
engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
|
||||
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
|
||||
break;
|
||||
|
||||
if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
|
||||
mthd, data))
|
||||
handled = true;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
return handled;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_isr(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine = &dev_priv->engine;
|
||||
uint32_t status, reassign;
|
||||
int cnt = 0;
|
||||
|
||||
reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
|
||||
while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
|
||||
uint32_t chid, get;
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
|
||||
|
||||
chid = engine->fifo.channel_id(dev);
|
||||
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
|
||||
|
||||
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
|
||||
uint32_t mthd, data;
|
||||
int ptr;
|
||||
|
||||
/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
|
||||
* wrapping on my G80 chips, but CACHE1 isn't big
|
||||
* enough for this much data.. Tests show that it
|
||||
* wraps around to the start at GET=0x800.. No clue
|
||||
* as to why..
|
||||
*/
|
||||
ptr = (get & 0x7ff) >> 2;
|
||||
|
||||
if (dev_priv->card_type < NV_40) {
|
||||
mthd = nv_rd32(dev,
|
||||
NV04_PFIFO_CACHE1_METHOD(ptr));
|
||||
data = nv_rd32(dev,
|
||||
NV04_PFIFO_CACHE1_DATA(ptr));
|
||||
} else {
|
||||
mthd = nv_rd32(dev,
|
||||
NV40_PFIFO_CACHE1_METHOD(ptr));
|
||||
data = nv_rd32(dev,
|
||||
NV40_PFIFO_CACHE1_DATA(ptr));
|
||||
}
|
||||
|
||||
if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
|
||||
NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
|
||||
"Mthd 0x%04x Data 0x%08x\n",
|
||||
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
|
||||
data);
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0,
|
||||
NV_PFIFO_INTR_CACHE_ERROR);
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
|
||||
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
|
||||
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
|
||||
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
|
||||
nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
|
||||
|
||||
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
|
||||
}
|
||||
|
||||
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
|
||||
u32 dma_get = nv_rd32(dev, 0x003244);
|
||||
u32 dma_put = nv_rd32(dev, 0x003240);
|
||||
u32 push = nv_rd32(dev, 0x003220);
|
||||
u32 state = nv_rd32(dev, 0x003228);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
u32 ho_get = nv_rd32(dev, 0x003328);
|
||||
u32 ho_put = nv_rd32(dev, 0x003320);
|
||||
u32 ib_get = nv_rd32(dev, 0x003334);
|
||||
u32 ib_put = nv_rd32(dev, 0x003330);
|
||||
|
||||
if (nouveau_ratelimit())
|
||||
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
|
||||
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
|
||||
"State 0x%08x Push 0x%08x\n",
|
||||
chid, ho_get, dma_get, ho_put,
|
||||
dma_put, ib_get, ib_put, state,
|
||||
push);
|
||||
|
||||
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
|
||||
nv_wr32(dev, 0x003364, 0x00000000);
|
||||
if (dma_get != dma_put || ho_get != ho_put) {
|
||||
nv_wr32(dev, 0x003244, dma_put);
|
||||
nv_wr32(dev, 0x003328, ho_put);
|
||||
} else
|
||||
if (ib_get != ib_put) {
|
||||
nv_wr32(dev, 0x003334, ib_put);
|
||||
}
|
||||
} else {
|
||||
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
|
||||
"Put 0x%08x State 0x%08x Push 0x%08x\n",
|
||||
chid, dma_get, dma_put, state, push);
|
||||
|
||||
if (dma_get != dma_put)
|
||||
nv_wr32(dev, 0x003244, dma_put);
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x003228, 0x00000000);
|
||||
nv_wr32(dev, 0x003220, 0x00000001);
|
||||
nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
|
||||
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
|
||||
}
|
||||
|
||||
if (status & NV_PFIFO_INTR_SEMAPHORE) {
|
||||
uint32_t sem;
|
||||
|
||||
status &= ~NV_PFIFO_INTR_SEMAPHORE;
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0,
|
||||
NV_PFIFO_INTR_SEMAPHORE);
|
||||
|
||||
sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
|
||||
nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
|
||||
}
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
if (status & 0x00000010) {
|
||||
nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
|
||||
status &= ~0x00000010;
|
||||
nv_wr32(dev, 0x002100, 0x00000010);
|
||||
}
|
||||
}
|
||||
|
||||
if (status) {
|
||||
if (nouveau_ratelimit())
|
||||
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
|
||||
status, chid);
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0, status);
|
||||
status = 0;
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
|
||||
nv_wr32(dev, 0x2140, 0);
|
||||
nv_wr32(dev, 0x140, 0);
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,11 @@
|
|||
#include "drm.h"
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_hw.h"
|
||||
#include "nouveau_util.h"
|
||||
|
||||
static int nv04_graph_register(struct drm_device *dev);
|
||||
static void nv04_graph_isr(struct drm_device *dev);
|
||||
|
||||
static uint32_t nv04_graph_ctx_regs[] = {
|
||||
0x0040053c,
|
||||
|
@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev)
|
|||
if (chid >= dev_priv->engine.fifo.channels)
|
||||
return NULL;
|
||||
|
||||
return dev_priv->fifos[chid];
|
||||
return dev_priv->channels.ptr[chid];
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
nv04_graph_context_switch(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev)
|
|||
struct nouveau_channel *chan = NULL;
|
||||
int chid;
|
||||
|
||||
pgraph->fifo_access(dev, false);
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
/* If previous context is valid, we need to save it */
|
||||
|
@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev)
|
|||
|
||||
/* Load context for next channel */
|
||||
chid = dev_priv->engine.fifo.channel_id(dev);
|
||||
chan = dev_priv->fifos[chid];
|
||||
chan = dev_priv->channels.ptr[chid];
|
||||
if (chan)
|
||||
nv04_graph_load_context(chan);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
}
|
||||
|
||||
static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
|
||||
|
@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
|
|||
|
||||
void nv04_graph_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
pgraph->fifo_access(dev, false);
|
||||
|
||||
/* Unload the context if it's the currently active one */
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
|
||||
/* Free the context resources */
|
||||
kfree(pgraph_ctx);
|
||||
chan->pgraph_ctx = NULL;
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
}
|
||||
|
||||
int nv04_graph_load_context(struct nouveau_channel *chan)
|
||||
|
@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
int ret;
|
||||
|
||||
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
|
||||
~NV_PMC_ENABLE_PGRAPH);
|
||||
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
|
||||
NV_PMC_ENABLE_PGRAPH);
|
||||
|
||||
ret = nv04_graph_register(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Enable PGRAPH interrupts */
|
||||
nouveau_irq_register(dev, 12, nv04_graph_isr);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
||||
|
@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev)
|
|||
|
||||
void nv04_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
|
||||
nouveau_irq_unregister(dev, 12);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
atomic_set(&chan->fence.last_sequence_irq, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_page_flip_state s;
|
||||
|
||||
if (!nouveau_finish_page_flip(chan, &s))
|
||||
nv_set_crtc_base(dev, s.crtc,
|
||||
s.offset + s.y * s.pitch + s.x * s.bpp / 8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Software methods, why they are needed, and how they all work:
|
||||
*
|
||||
|
@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
|
|||
*/
|
||||
|
||||
static void
|
||||
nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
|
||||
nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
|
||||
u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
|
||||
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
|
||||
uint32_t tmp;
|
||||
u32 tmp;
|
||||
|
||||
tmp = nv_ri32(dev, instance);
|
||||
tmp &= ~mask;
|
||||
|
@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
|
|||
}
|
||||
|
||||
static void
|
||||
nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
|
||||
nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
|
||||
uint32_t tmp, ctx1;
|
||||
u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
|
||||
u32 tmp, ctx1;
|
||||
int class, op, valid = 1;
|
||||
|
||||
ctx1 = nv_ri32(dev, instance);
|
||||
|
@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
if (data > 5)
|
||||
return 1;
|
||||
/* Old versions of the objects only accept first three operations. */
|
||||
if (data > 2 && grclass < 0x40)
|
||||
if (data > 2 && class < 0x40)
|
||||
return 1;
|
||||
nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
|
||||
/* changing operation changes set of objects needed for validation */
|
||||
|
@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
uint32_t min = data & 0xffff, max;
|
||||
uint32_t w = data >> 16;
|
||||
|
@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
uint32_t min = data & 0xffff, max;
|
||||
uint32_t w = data >> 16;
|
||||
|
@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
|
|||
}
|
||||
|
||||
static int
|
||||
nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
|
||||
case 0x30:
|
||||
|
@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
|
||||
{ 0x0150, nv04_graph_mthd_set_ref },
|
||||
static int
|
||||
nv04_graph_register(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->engine.graph.registered)
|
||||
return 0;
|
||||
|
||||
/* dvd subpicture */
|
||||
NVOBJ_CLASS(dev, 0x0038, GR);
|
||||
|
||||
/* m2mf */
|
||||
NVOBJ_CLASS(dev, 0x0039, GR);
|
||||
|
||||
/* nv03 gdirect */
|
||||
NVOBJ_CLASS(dev, 0x004b, GR);
|
||||
NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 gdirect */
|
||||
NVOBJ_CLASS(dev, 0x004a, GR);
|
||||
NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv01 imageblit */
|
||||
NVOBJ_CLASS(dev, 0x001f, GR);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
|
||||
NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 imageblit */
|
||||
NVOBJ_CLASS(dev, 0x005f, GR);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 iifc */
|
||||
NVOBJ_CLASS(dev, 0x0060, GR);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
|
||||
NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv05 iifc */
|
||||
NVOBJ_CLASS(dev, 0x0064, GR);
|
||||
|
||||
/* nv01 ifc */
|
||||
NVOBJ_CLASS(dev, 0x0021, GR);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 ifc */
|
||||
NVOBJ_CLASS(dev, 0x0061, GR);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv05 ifc */
|
||||
NVOBJ_CLASS(dev, 0x0065, GR);
|
||||
|
||||
/* nv03 sifc */
|
||||
NVOBJ_CLASS(dev, 0x0036, GR);
|
||||
NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 sifc */
|
||||
NVOBJ_CLASS(dev, 0x0076, GR);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv05 sifc */
|
||||
NVOBJ_CLASS(dev, 0x0066, GR);
|
||||
|
||||
/* nv03 sifm */
|
||||
NVOBJ_CLASS(dev, 0x0037, GR);
|
||||
NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 sifm */
|
||||
NVOBJ_CLASS(dev, 0x0077, GR);
|
||||
NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
|
||||
NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* null */
|
||||
NVOBJ_CLASS(dev, 0x0030, GR);
|
||||
|
||||
/* surf2d */
|
||||
NVOBJ_CLASS(dev, 0x0042, GR);
|
||||
|
||||
/* rop */
|
||||
NVOBJ_CLASS(dev, 0x0043, GR);
|
||||
|
||||
/* beta1 */
|
||||
NVOBJ_CLASS(dev, 0x0012, GR);
|
||||
|
||||
/* beta4 */
|
||||
NVOBJ_CLASS(dev, 0x0072, GR);
|
||||
|
||||
/* cliprect */
|
||||
NVOBJ_CLASS(dev, 0x0019, GR);
|
||||
|
||||
/* nv01 pattern */
|
||||
NVOBJ_CLASS(dev, 0x0018, GR);
|
||||
|
||||
/* nv04 pattern */
|
||||
NVOBJ_CLASS(dev, 0x0044, GR);
|
||||
|
||||
/* swzsurf */
|
||||
NVOBJ_CLASS(dev, 0x0052, GR);
|
||||
|
||||
/* surf3d */
|
||||
NVOBJ_CLASS(dev, 0x0053, GR);
|
||||
NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
|
||||
NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
|
||||
|
||||
/* nv03 tex_tri */
|
||||
NVOBJ_CLASS(dev, 0x0048, GR);
|
||||
NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
|
||||
NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
|
||||
|
||||
/* tex_tri */
|
||||
NVOBJ_CLASS(dev, 0x0054, GR);
|
||||
|
||||
/* multitex_tri */
|
||||
NVOBJ_CLASS(dev, 0x0055, GR);
|
||||
|
||||
/* nv01 chroma */
|
||||
NVOBJ_CLASS(dev, 0x0017, GR);
|
||||
|
||||
/* nv04 chroma */
|
||||
NVOBJ_CLASS(dev, 0x0057, GR);
|
||||
|
||||
/* surf_dst */
|
||||
NVOBJ_CLASS(dev, 0x0058, GR);
|
||||
|
||||
/* surf_src */
|
||||
NVOBJ_CLASS(dev, 0x0059, GR);
|
||||
|
||||
/* surf_color */
|
||||
NVOBJ_CLASS(dev, 0x005a, GR);
|
||||
|
||||
/* surf_zeta */
|
||||
NVOBJ_CLASS(dev, 0x005b, GR);
|
||||
|
||||
/* nv01 line */
|
||||
NVOBJ_CLASS(dev, 0x001c, GR);
|
||||
NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 line */
|
||||
NVOBJ_CLASS(dev, 0x005c, GR);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv01 tri */
|
||||
NVOBJ_CLASS(dev, 0x001d, GR);
|
||||
NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 tri */
|
||||
NVOBJ_CLASS(dev, 0x005d, GR);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv01 rect */
|
||||
NVOBJ_CLASS(dev, 0x001e, GR);
|
||||
NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
|
||||
NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
|
||||
NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nv04 rect */
|
||||
NVOBJ_CLASS(dev, 0x005e, GR);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
|
||||
NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
|
||||
|
||||
/* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x506e, SW);
|
||||
NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
|
||||
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
|
||||
|
||||
dev_priv->engine.graph.registered = true;
|
||||
return 0;
|
||||
};
|
||||
|
||||
static struct nouveau_bitfield nv04_graph_intr[] = {
|
||||
{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
|
||||
{ 0x0188, nv04_graph_mthd_bind_rop },
|
||||
{ 0x018c, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0190, nv04_graph_mthd_bind_surf_dst },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta4 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_surf2d },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_chroma },
|
||||
{ 0x0188, nv04_graph_mthd_bind_clip },
|
||||
{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
|
||||
{ 0x0190, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_surf_dst },
|
||||
{ 0x019c, nv04_graph_mthd_bind_surf_src },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_chroma },
|
||||
{ 0x0188, nv04_graph_mthd_bind_clip },
|
||||
{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
|
||||
{ 0x0190, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_beta4 },
|
||||
{ 0x019c, nv04_graph_mthd_bind_surf2d },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
|
||||
{ 0x0188, nv04_graph_mthd_bind_chroma },
|
||||
{ 0x018c, nv04_graph_mthd_bind_clip },
|
||||
{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
|
||||
{ 0x0194, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0198, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x019c, nv04_graph_mthd_bind_beta4 },
|
||||
{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
|
||||
{ 0x03e4, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_chroma },
|
||||
{ 0x0188, nv04_graph_mthd_bind_clip },
|
||||
{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
|
||||
{ 0x0190, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_surf_dst },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_chroma },
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_chroma },
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta4 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_surf2d },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
|
||||
{ 0x0304, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta4 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
|
||||
{ 0x0304, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_clip },
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
|
||||
{ 0x0184, nv04_graph_mthd_bind_clip },
|
||||
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
|
||||
{ 0x018c, nv04_graph_mthd_bind_rop },
|
||||
{ 0x0190, nv04_graph_mthd_bind_beta1 },
|
||||
{ 0x0194, nv04_graph_mthd_bind_beta4 },
|
||||
{ 0x0198, nv04_graph_mthd_bind_surf2d },
|
||||
{ 0x02fc, nv04_graph_mthd_set_operation },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
|
||||
{ 0x0188, nv04_graph_mthd_bind_clip },
|
||||
{ 0x018c, nv04_graph_mthd_bind_surf_color },
|
||||
{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
|
||||
{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
|
||||
{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
|
||||
{},
|
||||
};
|
||||
|
||||
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
|
||||
{ 0x0038, false, NULL }, /* dvd subpicture */
|
||||
{ 0x0039, false, NULL }, /* m2mf */
|
||||
{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
|
||||
{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
|
||||
{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
|
||||
{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
|
||||
{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
|
||||
{ 0x0064, false, NULL }, /* nv05 iifc */
|
||||
{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
|
||||
{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
|
||||
{ 0x0065, false, NULL }, /* nv05 ifc */
|
||||
{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
|
||||
{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
|
||||
{ 0x0066, false, NULL }, /* nv05 sifc */
|
||||
{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
|
||||
{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
|
||||
{ 0x0030, false, NULL }, /* null */
|
||||
{ 0x0042, false, NULL }, /* surf2d */
|
||||
{ 0x0043, false, NULL }, /* rop */
|
||||
{ 0x0012, false, NULL }, /* beta1 */
|
||||
{ 0x0072, false, NULL }, /* beta4 */
|
||||
{ 0x0019, false, NULL }, /* cliprect */
|
||||
{ 0x0018, false, NULL }, /* nv01 pattern */
|
||||
{ 0x0044, false, NULL }, /* nv04 pattern */
|
||||
{ 0x0052, false, NULL }, /* swzsurf */
|
||||
{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
|
||||
{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
|
||||
{ 0x0054, false, NULL }, /* tex_tri */
|
||||
{ 0x0055, false, NULL }, /* multitex_tri */
|
||||
{ 0x0017, false, NULL }, /* nv01 chroma */
|
||||
{ 0x0057, false, NULL }, /* nv04 chroma */
|
||||
{ 0x0058, false, NULL }, /* surf_dst */
|
||||
{ 0x0059, false, NULL }, /* surf_src */
|
||||
{ 0x005a, false, NULL }, /* surf_color */
|
||||
{ 0x005b, false, NULL }, /* surf_zeta */
|
||||
{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
|
||||
{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
|
||||
{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
|
||||
{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
|
||||
{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
|
||||
{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
|
||||
{ 0x506e, true, nv04_graph_mthds_sw },
|
||||
static struct nouveau_bitfield nv04_graph_nstatus[] =
|
||||
{
|
||||
{ NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
|
||||
{ NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
|
||||
{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
|
||||
{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
|
||||
{}
|
||||
};
|
||||
|
||||
struct nouveau_bitfield nv04_graph_nsource[] =
|
||||
{
|
||||
{ NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
|
||||
{ NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
|
||||
{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
|
||||
{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
|
||||
{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
|
||||
{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
|
||||
{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
|
||||
{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
|
||||
{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
|
||||
{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
|
||||
{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
|
||||
{ NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
|
||||
{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
|
||||
{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
|
||||
{ NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
|
||||
{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
|
||||
{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
|
||||
{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
|
||||
{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
|
||||
{}
|
||||
};
|
||||
|
||||
static void
|
||||
nv04_graph_isr(struct drm_device *dev)
|
||||
{
|
||||
u32 stat;
|
||||
|
||||
while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
|
||||
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
|
||||
u32 chid = (addr & 0x0f000000) >> 24;
|
||||
u32 subc = (addr & 0x0000e000) >> 13;
|
||||
u32 mthd = (addr & 0x00001ffc);
|
||||
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
|
||||
u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
|
||||
u32 show = stat;
|
||||
|
||||
if (stat & NV_PGRAPH_INTR_NOTIFY) {
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||
if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
|
||||
show &= ~NV_PGRAPH_INTR_NOTIFY;
|
||||
}
|
||||
}
|
||||
|
||||
if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
nv04_graph_context_switch(dev);
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, stat);
|
||||
nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
|
||||
|
||||
if (show && nouveau_ratelimit()) {
|
||||
NV_INFO(dev, "PGRAPH -");
|
||||
nouveau_bitfield_print(nv04_graph_intr, show);
|
||||
printk(" nsource:");
|
||||
nouveau_bitfield_print(nv04_graph_nsource, nsource);
|
||||
printk(" nstatus:");
|
||||
nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
|
||||
printk("\n");
|
||||
NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
|
||||
"mthd 0x%04x data 0x%08x\n",
|
||||
chid, subc, class, mthd, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,35 +97,6 @@ nv04_instmem_takedown(struct drm_device *dev)
|
|||
nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
|
||||
uint32_t *sz)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_suspend(struct drm_device *dev)
|
||||
{
|
||||
|
@ -137,3 +108,56 @@ nv04_instmem_resume(struct drm_device *dev)
|
|||
{
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct drm_mm_node *ramin = NULL;
|
||||
|
||||
do {
|
||||
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
|
||||
if (ramin == NULL) {
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ramin = drm_mm_get_block_atomic(ramin, size, align);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
} while (ramin == NULL);
|
||||
|
||||
gpuobj->node = ramin;
|
||||
gpuobj->vinst = ramin->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
drm_mm_put_block(gpuobj->node);
|
||||
gpuobj->node = NULL;
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
gpuobj->pinst = gpuobj->vinst;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -3,23 +3,109 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
|
||||
void
|
||||
nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
static struct drm_mm_node *
|
||||
nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct drm_mm_node *mem;
|
||||
int ret;
|
||||
|
||||
if (pitch) {
|
||||
if (dev_priv->card_type >= NV_20)
|
||||
addr |= 1;
|
||||
else
|
||||
addr |= 1 << 31;
|
||||
ret = drm_mm_pre_get(&pfb->tag_heap);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
|
||||
if (mem)
|
||||
mem = drm_mm_get_block_atomic(mem, size, 0);
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
static void
|
||||
nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
drm_mm_put_block(mem);
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
}
|
||||
|
||||
void
|
||||
nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
|
||||
|
||||
tile->addr = addr;
|
||||
tile->limit = max(1u, addr + size) - 1;
|
||||
tile->pitch = pitch;
|
||||
|
||||
if (dev_priv->card_type == NV_20) {
|
||||
if (flags & NOUVEAU_GEM_TILE_ZETA) {
|
||||
/*
|
||||
* Allocate some of the on-die tag memory,
|
||||
* used to store Z compression meta-data (most
|
||||
* likely just a bitmap determining if a given
|
||||
* tile is compressed or not).
|
||||
*/
|
||||
tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
|
||||
|
||||
if (tile->tag_mem) {
|
||||
/* Enable Z compression */
|
||||
if (dev_priv->chipset >= 0x25)
|
||||
tile->zcomp = tile->tag_mem->start |
|
||||
(bpp == 16 ?
|
||||
NV25_PFB_ZCOMP_MODE_16 :
|
||||
NV25_PFB_ZCOMP_MODE_32);
|
||||
else
|
||||
tile->zcomp = tile->tag_mem->start |
|
||||
NV20_PFB_ZCOMP_EN |
|
||||
(bpp == 16 ? 0 :
|
||||
NV20_PFB_ZCOMP_MODE_32);
|
||||
}
|
||||
|
||||
tile->addr |= 3;
|
||||
} else {
|
||||
tile->addr |= 1;
|
||||
}
|
||||
|
||||
} else {
|
||||
tile->addr |= 1 << 31;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nv10_fb_free_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
if (tile->tag_mem) {
|
||||
nv20_fb_free_tag(dev, tile->tag_mem);
|
||||
tile->tag_mem = NULL;
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), addr);
|
||||
tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_fb_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
|
||||
|
||||
if (dev_priv->card_type == NV_20)
|
||||
nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev)
|
|||
|
||||
pfb->num_tiles = NV10_PFB_TILE__SIZE;
|
||||
|
||||
if (dev_priv->card_type == NV_20)
|
||||
drm_mm_init(&pfb->tag_heap, 0,
|
||||
(dev_priv->chipset >= 0x25 ?
|
||||
64 * 1024 : 32 * 1024));
|
||||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->set_region_tiling(dev, i, 0, 0, 0);
|
||||
pfb->set_tile_region(dev, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev)
|
|||
void
|
||||
nv10_fb_takedown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->free_tile_region(dev, i);
|
||||
|
||||
if (dev_priv->card_type == NV_20)
|
||||
drm_mm_takedown(&pfb->tag_heap);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV03_USER(chan->id), PAGE_SIZE);
|
||||
if (!chan->user)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Fill entries that are seen filled in dumps of nvidia driver just
|
||||
* after channel's is put into DMA mode
|
||||
*/
|
||||
|
@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
|
||||
nv_wr32(dev, NV04_PFIFO_MODE,
|
||||
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
}
|
||||
|
||||
static void
|
||||
nv10_fifo_do_load_context(struct drm_device *dev, int chid)
|
||||
{
|
||||
|
@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
|
|||
static void
|
||||
nv10_fifo_init_intr(struct drm_device *dev)
|
||||
{
|
||||
nouveau_irq_register(dev, 8, nv04_fifo_isr);
|
||||
nv_wr32(dev, 0x002100, 0xffffffff);
|
||||
nv_wr32(dev, 0x002140, 0xffffffff);
|
||||
}
|
||||
|
@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev)
|
|||
pfifo->reassign(dev, true);
|
||||
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
if (dev_priv->fifos[i]) {
|
||||
if (dev_priv->channels.ptr[i]) {
|
||||
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
|
||||
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
|
||||
}
|
||||
|
|
|
@ -26,6 +26,10 @@
|
|||
#include "drm.h"
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
|
||||
static int nv10_graph_register(struct drm_device *);
|
||||
static void nv10_graph_isr(struct drm_device *);
|
||||
|
||||
#define NV10_FIFO_NUMBER 32
|
||||
|
||||
|
@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
nv10_graph_context_switch(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_channel *chan = NULL;
|
||||
int chid;
|
||||
|
||||
pgraph->fifo_access(dev, false);
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
/* If previous context is valid, we need to save it */
|
||||
|
@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev)
|
|||
|
||||
/* Load context for next channel */
|
||||
chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
|
||||
chan = dev_priv->fifos[chid];
|
||||
chan = dev_priv->channels.ptr[chid];
|
||||
if (chan && chan->pgraph_ctx)
|
||||
nv10_graph_load_context(chan);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
}
|
||||
|
||||
#define NV_WRITE_CTX(reg, val) do { \
|
||||
|
@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
|
|||
if (chid >= dev_priv->engine.fifo.channels)
|
||||
return NULL;
|
||||
|
||||
return dev_priv->fifos[chid];
|
||||
return dev_priv->channels.ptr[chid];
|
||||
}
|
||||
|
||||
int nv10_graph_create_context(struct nouveau_channel *chan)
|
||||
|
@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
|
|||
|
||||
void nv10_graph_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
pgraph->fifo_access(dev, false);
|
||||
|
||||
/* Unload the context if it's the currently active one */
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
|
||||
/* Free the context resources */
|
||||
kfree(pgraph_ctx);
|
||||
chan->pgraph_ctx = NULL;
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv10_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
if (pitch)
|
||||
addr |= 1 << 31;
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
|
||||
}
|
||||
|
||||
int nv10_graph_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
int ret, i;
|
||||
|
||||
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
|
||||
~NV_PMC_ENABLE_PGRAPH);
|
||||
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
|
||||
NV_PMC_ENABLE_PGRAPH);
|
||||
|
||||
ret = nv10_graph_register(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_irq_register(dev, 12, nv10_graph_isr);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
||||
|
@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
|
||||
nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv10_graph_set_tile_region(dev, i);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
|
||||
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
|
||||
|
@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev)
|
|||
|
||||
void nv10_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
|
||||
nouveau_irq_unregister(dev, 12);
|
||||
}
|
||||
|
||||
static int
|
||||
nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct graph_state *ctx = chan->pgraph_ctx;
|
||||
struct pipe_state *pipe = &ctx->pipe_state;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
|
||||
uint32_t xfmode0, xfmode1;
|
||||
int i;
|
||||
|
@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
|
|||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
|
||||
u32 class, u32 mthd, u32 data)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
|
@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
|
|||
nv_wr32(dev, 0x004006b0,
|
||||
nv_rd32(dev, 0x004006b0) | 0x8 << 24);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
|
||||
{ 0x1638, nv17_graph_mthd_lma_window },
|
||||
{ 0x163c, nv17_graph_mthd_lma_window },
|
||||
{ 0x1640, nv17_graph_mthd_lma_window },
|
||||
{ 0x1644, nv17_graph_mthd_lma_window },
|
||||
{ 0x1658, nv17_graph_mthd_lma_enable },
|
||||
static int
|
||||
nv10_graph_register(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->engine.graph.registered)
|
||||
return 0;
|
||||
|
||||
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
|
||||
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
|
||||
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
|
||||
NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
|
||||
NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
|
||||
NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
|
||||
NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
|
||||
NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
|
||||
NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
|
||||
NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
|
||||
NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
|
||||
NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
|
||||
NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
|
||||
NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
|
||||
NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
|
||||
NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
|
||||
NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
|
||||
|
||||
/* celcius */
|
||||
if (dev_priv->chipset <= 0x10) {
|
||||
NVOBJ_CLASS(dev, 0x0056, GR);
|
||||
} else
|
||||
if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
|
||||
NVOBJ_CLASS(dev, 0x0096, GR);
|
||||
} else {
|
||||
NVOBJ_CLASS(dev, 0x0099, GR);
|
||||
NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
|
||||
NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
|
||||
NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
|
||||
NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
|
||||
NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
|
||||
}
|
||||
|
||||
/* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x506e, SW);
|
||||
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
|
||||
|
||||
dev_priv->engine.graph.registered = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_bitfield nv10_graph_intr[] = {
|
||||
{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
|
||||
{ NV_PGRAPH_INTR_ERROR, "ERROR" },
|
||||
{}
|
||||
};
|
||||
|
||||
struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
|
||||
{ 0x0030, false, NULL }, /* null */
|
||||
{ 0x0039, false, NULL }, /* m2mf */
|
||||
{ 0x004a, false, NULL }, /* gdirect */
|
||||
{ 0x005f, false, NULL }, /* imageblit */
|
||||
{ 0x009f, false, NULL }, /* imageblit (nv12) */
|
||||
{ 0x008a, false, NULL }, /* ifc */
|
||||
{ 0x0089, false, NULL }, /* sifm */
|
||||
{ 0x0062, false, NULL }, /* surf2d */
|
||||
{ 0x0043, false, NULL }, /* rop */
|
||||
{ 0x0012, false, NULL }, /* beta1 */
|
||||
{ 0x0072, false, NULL }, /* beta4 */
|
||||
{ 0x0019, false, NULL }, /* cliprect */
|
||||
{ 0x0044, false, NULL }, /* pattern */
|
||||
{ 0x0052, false, NULL }, /* swzsurf */
|
||||
{ 0x0093, false, NULL }, /* surf3d */
|
||||
{ 0x0094, false, NULL }, /* tex_tri */
|
||||
{ 0x0095, false, NULL }, /* multitex_tri */
|
||||
{ 0x0056, false, NULL }, /* celcius (nv10) */
|
||||
{ 0x0096, false, NULL }, /* celcius (nv11) */
|
||||
{ 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
|
||||
struct nouveau_bitfield nv10_graph_nstatus[] =
|
||||
{
|
||||
{ NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
|
||||
{ NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
|
||||
{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
|
||||
{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
|
||||
{}
|
||||
};
|
||||
|
||||
static void
|
||||
nv10_graph_isr(struct drm_device *dev)
|
||||
{
|
||||
u32 stat;
|
||||
|
||||
while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
|
||||
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
|
||||
u32 chid = (addr & 0x01f00000) >> 20;
|
||||
u32 subc = (addr & 0x00070000) >> 16;
|
||||
u32 mthd = (addr & 0x00001ffc);
|
||||
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
|
||||
u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
|
||||
u32 show = stat;
|
||||
|
||||
if (stat & NV_PGRAPH_INTR_ERROR) {
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||
if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
|
||||
show &= ~NV_PGRAPH_INTR_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||
stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
nv10_graph_context_switch(dev);
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, stat);
|
||||
nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
|
||||
|
||||
if (show && nouveau_ratelimit()) {
|
||||
NV_INFO(dev, "PGRAPH -");
|
||||
nouveau_bitfield_print(nv10_graph_intr, show);
|
||||
printk(" nsource:");
|
||||
nouveau_bitfield_print(nv04_graph_nsource, nsource);
|
||||
printk(" nstatus:");
|
||||
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
|
||||
printk("\n");
|
||||
NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
|
||||
"mthd 0x%04x data 0x%08x\n",
|
||||
chid, subc, class, mthd, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,10 @@
|
|||
#define NV34_GRCTX_SIZE (18140)
|
||||
#define NV35_36_GRCTX_SIZE (22396)
|
||||
|
||||
static int nv20_graph_register(struct drm_device *);
|
||||
static int nv30_graph_register(struct drm_device *);
|
||||
static void nv20_graph_isr(struct drm_device *);
|
||||
|
||||
static void
|
||||
nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
||||
{
|
||||
|
@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
|
|||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
unsigned long flags;
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
pgraph->fifo_access(dev, false);
|
||||
|
||||
/* Unload the context if it's the currently active one */
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
/* Free the context resources */
|
||||
nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
|
|||
}
|
||||
|
||||
void
|
||||
nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv20_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
if (pitch)
|
||||
addr |= 1;
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
|
||||
|
||||
if (dev_priv->card_type == NV_20) {
|
||||
nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
|
|||
|
||||
nv20_graph_rdi(dev);
|
||||
|
||||
ret = nv20_graph_register(dev);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nouveau_irq_register(dev, 12, nv20_graph_isr);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
||||
|
@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
|
|||
nv_wr32(dev, 0x40009C , 0x00000040);
|
||||
|
||||
if (dev_priv->chipset >= 0x25) {
|
||||
nv_wr32(dev, 0x400890, 0x00080000);
|
||||
nv_wr32(dev, 0x400890, 0x00a8cfff);
|
||||
nv_wr32(dev, 0x400610, 0x304B1FB6);
|
||||
nv_wr32(dev, 0x400B80, 0x18B82880);
|
||||
nv_wr32(dev, 0x400B80, 0x1cbd3883);
|
||||
nv_wr32(dev, 0x400B84, 0x44000000);
|
||||
nv_wr32(dev, 0x400098, 0x40000080);
|
||||
nv_wr32(dev, 0x400B88, 0x000000ff);
|
||||
|
||||
} else {
|
||||
nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
|
||||
nv_wr32(dev, 0x400880, 0x0008c7df);
|
||||
nv_wr32(dev, 0x400094, 0x00000005);
|
||||
nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
|
||||
nv_wr32(dev, 0x400B80, 0x45eae20e);
|
||||
nv_wr32(dev, 0x400B84, 0x24000000);
|
||||
nv_wr32(dev, 0x400098, 0x00000040);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
|
||||
|
@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
|
||||
nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv20_graph_set_tile_region(dev, i);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
|
||||
nv_rd32(dev, 0x100300 + i * 4));
|
||||
}
|
||||
nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
|
||||
|
@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
|
||||
nouveau_irq_unregister(dev, 12);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
|
||||
}
|
||||
|
||||
|
@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = nv30_graph_register(dev);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
|
||||
pgraph->ctx_table->pinst >> 4);
|
||||
|
||||
nouveau_irq_register(dev, 12, nv20_graph_isr);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
||||
|
@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
|
||||
nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv20_graph_set_tile_region(dev, i);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
|
||||
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
|
||||
|
@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
|
||||
{ 0x0030, false, NULL }, /* null */
|
||||
{ 0x0039, false, NULL }, /* m2mf */
|
||||
{ 0x004a, false, NULL }, /* gdirect */
|
||||
{ 0x009f, false, NULL }, /* imageblit (nv12) */
|
||||
{ 0x008a, false, NULL }, /* ifc */
|
||||
{ 0x0089, false, NULL }, /* sifm */
|
||||
{ 0x0062, false, NULL }, /* surf2d */
|
||||
{ 0x0043, false, NULL }, /* rop */
|
||||
{ 0x0012, false, NULL }, /* beta1 */
|
||||
{ 0x0072, false, NULL }, /* beta4 */
|
||||
{ 0x0019, false, NULL }, /* cliprect */
|
||||
{ 0x0044, false, NULL }, /* pattern */
|
||||
{ 0x009e, false, NULL }, /* swzsurf */
|
||||
{ 0x0096, false, NULL }, /* celcius */
|
||||
{ 0x0097, false, NULL }, /* kelvin (nv20) */
|
||||
{ 0x0597, false, NULL }, /* kelvin (nv25) */
|
||||
{}
|
||||
};
|
||||
static int
|
||||
nv20_graph_register(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
|
||||
{ 0x0030, false, NULL }, /* null */
|
||||
{ 0x0039, false, NULL }, /* m2mf */
|
||||
{ 0x004a, false, NULL }, /* gdirect */
|
||||
{ 0x009f, false, NULL }, /* imageblit (nv12) */
|
||||
{ 0x008a, false, NULL }, /* ifc */
|
||||
{ 0x038a, false, NULL }, /* ifc (nv30) */
|
||||
{ 0x0089, false, NULL }, /* sifm */
|
||||
{ 0x0389, false, NULL }, /* sifm (nv30) */
|
||||
{ 0x0062, false, NULL }, /* surf2d */
|
||||
{ 0x0362, false, NULL }, /* surf2d (nv30) */
|
||||
{ 0x0043, false, NULL }, /* rop */
|
||||
{ 0x0012, false, NULL }, /* beta1 */
|
||||
{ 0x0072, false, NULL }, /* beta4 */
|
||||
{ 0x0019, false, NULL }, /* cliprect */
|
||||
{ 0x0044, false, NULL }, /* pattern */
|
||||
{ 0x039e, false, NULL }, /* swzsurf */
|
||||
{ 0x0397, false, NULL }, /* rankine (nv30) */
|
||||
{ 0x0497, false, NULL }, /* rankine (nv35) */
|
||||
{ 0x0697, false, NULL }, /* rankine (nv34) */
|
||||
{}
|
||||
};
|
||||
if (dev_priv->engine.graph.registered)
|
||||
return 0;
|
||||
|
||||
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
|
||||
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
|
||||
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
|
||||
NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
|
||||
NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
|
||||
NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
|
||||
NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
|
||||
NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
|
||||
NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
|
||||
NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
|
||||
NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
|
||||
NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
|
||||
NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
|
||||
NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
|
||||
|
||||
/* kelvin */
|
||||
if (dev_priv->chipset < 0x25)
|
||||
NVOBJ_CLASS(dev, 0x0097, GR);
|
||||
else
|
||||
NVOBJ_CLASS(dev, 0x0597, GR);
|
||||
|
||||
/* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x506e, SW);
|
||||
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
|
||||
|
||||
dev_priv->engine.graph.registered = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv30_graph_register(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->engine.graph.registered)
|
||||
return 0;
|
||||
|
||||
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
|
||||
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
|
||||
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
|
||||
NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
|
||||
NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
|
||||
NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
|
||||
NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
|
||||
NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
|
||||
NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
|
||||
NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
|
||||
NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
|
||||
NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
|
||||
NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
|
||||
NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
|
||||
NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
|
||||
NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
|
||||
|
||||
/* rankine */
|
||||
if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
|
||||
NVOBJ_CLASS(dev, 0x0397, GR);
|
||||
else
|
||||
if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
|
||||
NVOBJ_CLASS(dev, 0x0697, GR);
|
||||
else
|
||||
if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
|
||||
NVOBJ_CLASS(dev, 0x0497, GR);
|
||||
|
||||
/* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x506e, SW);
|
||||
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
|
||||
|
||||
dev_priv->engine.graph.registered = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv20_graph_isr(struct drm_device *dev)
|
||||
{
|
||||
u32 stat;
|
||||
|
||||
while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
|
||||
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
|
||||
u32 chid = (addr & 0x01f00000) >> 20;
|
||||
u32 subc = (addr & 0x00070000) >> 16;
|
||||
u32 mthd = (addr & 0x00001ffc);
|
||||
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
|
||||
u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
|
||||
u32 show = stat;
|
||||
|
||||
if (stat & NV_PGRAPH_INTR_ERROR) {
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||
if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
|
||||
show &= ~NV_PGRAPH_INTR_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, stat);
|
||||
nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
|
||||
|
||||
if (show && nouveau_ratelimit()) {
|
||||
NV_INFO(dev, "PGRAPH -");
|
||||
nouveau_bitfield_print(nv10_graph_intr, show);
|
||||
printk(" nsource:");
|
||||
nouveau_bitfield_print(nv04_graph_nsource, nsource);
|
||||
printk(" nstatus:");
|
||||
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
|
||||
printk("\n");
|
||||
NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
|
||||
"mthd 0x%04x data 0x%08x\n",
|
||||
chid, subc, class, mthd, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,27 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
|
||||
void
|
||||
nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
tile->addr = addr | 1;
|
||||
tile->limit = max(1u, addr + size) - 1;
|
||||
tile->pitch = pitch;
|
||||
}
|
||||
|
||||
void
|
||||
nv30_fb_free_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
tile->addr = tile->limit = tile->pitch = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
calc_bias(struct drm_device *dev, int k, int i, int j)
|
||||
{
|
||||
|
@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->set_region_tiling(dev, i, 0, 0, 0);
|
||||
pfb->set_tile_region(dev, i);
|
||||
|
||||
/* Init the memory timing regs at 0x10037c/0x1003ac */
|
||||
if (dev_priv->chipset == 0x30 ||
|
||||
|
|
|
@ -4,26 +4,22 @@
|
|||
#include "nouveau_drm.h"
|
||||
|
||||
void
|
||||
nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv40_fb_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
|
||||
if (pitch)
|
||||
addr |= 1;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40:
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), addr);
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV40_PFB_TILE(i), addr);
|
||||
nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->set_region_tiling(dev, i, 0, 0, 0);
|
||||
pfb->set_tile_region(dev, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV40_USER(chan->id), PAGE_SIZE);
|
||||
if (!chan->user)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
nv_wi32(dev, fc + 0, chan->pushbuf_base);
|
||||
|
@ -59,7 +64,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
|
|||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
0x30000000 /* no idea.. */);
|
||||
nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
|
||||
nv_wi32(dev, fc + 60, 0x0001FFFF);
|
||||
|
||||
/* enable the fifo dma operation */
|
||||
|
@ -70,17 +74,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
|
||||
nv_wr32(dev, NV04_PFIFO_MODE,
|
||||
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_fifo_do_load_context(struct drm_device *dev, int chid)
|
||||
{
|
||||
|
@ -279,6 +272,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
|
|||
static void
|
||||
nv40_fifo_init_intr(struct drm_device *dev)
|
||||
{
|
||||
nouveau_irq_register(dev, 8, nv04_fifo_isr);
|
||||
nv_wr32(dev, 0x002100, 0xffffffff);
|
||||
nv_wr32(dev, 0x002140, 0xffffffff);
|
||||
}
|
||||
|
@ -301,7 +295,7 @@ nv40_fifo_init(struct drm_device *dev)
|
|||
pfifo->reassign(dev, true);
|
||||
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
if (dev_priv->fifos[i]) {
|
||||
if (dev_priv->channels.ptr[i]) {
|
||||
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
|
||||
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
|
||||
}
|
||||
|
|
|
@ -29,6 +29,9 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_grctx.h"
|
||||
|
||||
static int nv40_graph_register(struct drm_device *);
|
||||
static void nv40_graph_isr(struct drm_device *);
|
||||
|
||||
struct nouveau_channel *
|
||||
nv40_graph_channel(struct drm_device *dev)
|
||||
{
|
||||
|
@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
|
|||
inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
|
||||
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
struct nouveau_channel *chan = dev_priv->fifos[i];
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
|
||||
|
||||
if (chan && chan->ramin_grctx &&
|
||||
chan->ramin_grctx->pinst == inst)
|
||||
|
@ -59,6 +62,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
|
|||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_grctx ctx = {};
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
|
||||
|
@ -73,12 +77,39 @@ nv40_graph_create_context(struct nouveau_channel *chan)
|
|||
nv40_grctx_init(&ctx);
|
||||
|
||||
nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
|
||||
|
||||
/* init grctx pointer in ramfc, and on PFIFO if channel is
|
||||
* already active there
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
|
||||
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
|
||||
if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
|
||||
nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
|
||||
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_graph_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
pgraph->fifo_access(dev, false);
|
||||
|
||||
/* Unload the context if it's the currently active one */
|
||||
if (pgraph->channel(dev) == chan)
|
||||
pgraph->unload_context(dev);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
/* Free the context resources */
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
|
||||
}
|
||||
|
||||
|
@ -174,43 +205,39 @@ nv40_graph_unload_context(struct drm_device *dev)
|
|||
}
|
||||
|
||||
void
|
||||
nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv40_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
|
||||
if (pitch)
|
||||
addr |= 1;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x44:
|
||||
case 0x4a:
|
||||
case 0x4e:
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
|
||||
break;
|
||||
|
||||
case 0x46:
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
|
||||
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -232,7 +259,7 @@ nv40_graph_init(struct drm_device *dev)
|
|||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_grctx ctx = {};
|
||||
uint32_t vramsz, *cp;
|
||||
int i, j;
|
||||
int ret, i, j;
|
||||
|
||||
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
|
||||
~NV_PMC_ENABLE_PGRAPH);
|
||||
|
@ -256,9 +283,14 @@ nv40_graph_init(struct drm_device *dev)
|
|||
|
||||
kfree(cp);
|
||||
|
||||
ret = nv40_graph_register(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* No context present currently */
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
|
||||
|
||||
nouveau_irq_register(dev, 12, nv40_graph_isr);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
||||
|
@ -347,7 +379,7 @@ nv40_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv40_graph_set_tile_region(dev, i);
|
||||
|
||||
/* begin RAM config */
|
||||
vramsz = pci_resource_len(dev->pdev, 0) - 1;
|
||||
|
@ -390,26 +422,111 @@ nv40_graph_init(struct drm_device *dev)
|
|||
|
||||
void nv40_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
nouveau_irq_unregister(dev, 12);
|
||||
}
|
||||
|
||||
struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
|
||||
{ 0x0030, false, NULL }, /* null */
|
||||
{ 0x0039, false, NULL }, /* m2mf */
|
||||
{ 0x004a, false, NULL }, /* gdirect */
|
||||
{ 0x009f, false, NULL }, /* imageblit (nv12) */
|
||||
{ 0x008a, false, NULL }, /* ifc */
|
||||
{ 0x0089, false, NULL }, /* sifm */
|
||||
{ 0x3089, false, NULL }, /* sifm (nv40) */
|
||||
{ 0x0062, false, NULL }, /* surf2d */
|
||||
{ 0x3062, false, NULL }, /* surf2d (nv40) */
|
||||
{ 0x0043, false, NULL }, /* rop */
|
||||
{ 0x0012, false, NULL }, /* beta1 */
|
||||
{ 0x0072, false, NULL }, /* beta4 */
|
||||
{ 0x0019, false, NULL }, /* cliprect */
|
||||
{ 0x0044, false, NULL }, /* pattern */
|
||||
{ 0x309e, false, NULL }, /* swzsurf */
|
||||
{ 0x4097, false, NULL }, /* curie (nv40) */
|
||||
{ 0x4497, false, NULL }, /* curie (nv44) */
|
||||
{}
|
||||
};
|
||||
static int
|
||||
nv40_graph_register(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->engine.graph.registered)
|
||||
return 0;
|
||||
|
||||
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
|
||||
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
|
||||
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
|
||||
NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
|
||||
NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
|
||||
NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
|
||||
NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
|
||||
NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
|
||||
NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
|
||||
NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
|
||||
NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
|
||||
NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
|
||||
NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
|
||||
NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
|
||||
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
|
||||
|
||||
/* curie */
|
||||
if (dev_priv->chipset >= 0x60 ||
|
||||
0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
|
||||
NVOBJ_CLASS(dev, 0x4497, GR);
|
||||
else
|
||||
NVOBJ_CLASS(dev, 0x4097, GR);
|
||||
|
||||
/* nvsw */
|
||||
NVOBJ_CLASS(dev, 0x506e, SW);
|
||||
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
|
||||
|
||||
dev_priv->engine.graph.registered = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
chan = dev_priv->channels.ptr[i];
|
||||
if (!chan || !chan->ramin_grctx)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin_grctx->pinst)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
return i;
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_graph_isr(struct drm_device *dev)
|
||||
{
|
||||
u32 stat;
|
||||
|
||||
while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
|
||||
u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
|
||||
u32 chid = nv40_graph_isr_chid(dev, inst);
|
||||
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
|
||||
u32 subc = (addr & 0x00070000) >> 16;
|
||||
u32 mthd = (addr & 0x00001ffc);
|
||||
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
|
||||
u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
|
||||
u32 show = stat;
|
||||
|
||||
if (stat & NV_PGRAPH_INTR_ERROR) {
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
|
||||
if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
|
||||
show &= ~NV_PGRAPH_INTR_ERROR;
|
||||
} else
|
||||
if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
|
||||
nv_mask(dev, 0x402000, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, stat);
|
||||
nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
|
||||
|
||||
if (show && nouveau_ratelimit()) {
|
||||
NV_INFO(dev, "PGRAPH -");
|
||||
nouveau_bitfield_print(nv10_graph_intr, show);
|
||||
printk(" nsource:");
|
||||
nouveau_bitfield_print(nv04_graph_nsource, nsource);
|
||||
printk(" nstatus:");
|
||||
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
|
||||
printk("\n");
|
||||
NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
|
||||
"class 0x%04x mthd 0x%04x data 0x%08x\n",
|
||||
chid, inst, subc, class, mthd, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
|
|||
OUT_RING(evo, 0);
|
||||
BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
|
||||
if (dev_priv->chipset != 0x50)
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00)
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00 ||
|
||||
nv_crtc->fb.tile_flags == 0xfe00)
|
||||
OUT_RING(evo, NvEvoFB32);
|
||||
else
|
||||
if (nv_crtc->fb.tile_flags == 0x7000)
|
||||
OUT_RING(evo, NvEvoFB16);
|
||||
else
|
||||
OUT_RING(evo, NvEvoVRAM);
|
||||
OUT_RING(evo, NvEvoVRAM_LP);
|
||||
else
|
||||
OUT_RING(evo, NvEvoVRAM);
|
||||
OUT_RING(evo, NvEvoVRAM_LP);
|
||||
}
|
||||
|
||||
nv_crtc->fb.blanked = blanked;
|
||||
|
@ -345,7 +346,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
|||
uint32_t buffer_handle, uint32_t width, uint32_t height)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nouveau_bo *cursor = NULL;
|
||||
struct drm_gem_object *gem;
|
||||
|
@ -374,8 +374,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
|||
|
||||
nouveau_bo_unmap(cursor);
|
||||
|
||||
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
|
||||
dev_priv->vm_vram_base);
|
||||
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
|
||||
nv_crtc->cursor.show(nv_crtc, true);
|
||||
|
||||
out:
|
||||
|
@ -437,6 +436,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = {
|
|||
.cursor_move = nv50_crtc_cursor_move,
|
||||
.gamma_set = nv50_crtc_gamma_set,
|
||||
.set_config = drm_crtc_helper_set_config,
|
||||
.page_flip = nouveau_crtc_page_flip,
|
||||
.destroy = nv50_crtc_destroy,
|
||||
};
|
||||
|
||||
|
@ -453,6 +453,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
|
|||
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
drm_vblank_pre_modeset(dev, nv_crtc->index);
|
||||
nv50_crtc_blank(nv_crtc, true);
|
||||
}
|
||||
|
||||
|
@ -468,6 +469,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
|
|||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
nv50_crtc_blank(nv_crtc, false);
|
||||
drm_vblank_post_modeset(dev, nv_crtc->index);
|
||||
|
||||
ret = RING_SPACE(evo, 2);
|
||||
if (ret) {
|
||||
|
@ -545,7 +547,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
|
||||
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
|
||||
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
|
||||
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
|
||||
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
|
||||
|
@ -554,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
|
||||
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00)
|
||||
if (nv_crtc->fb.tile_flags == 0x7a00 ||
|
||||
nv_crtc->fb.tile_flags == 0xfe00)
|
||||
OUT_RING(evo, NvEvoFB32);
|
||||
else
|
||||
if (nv_crtc->fb.tile_flags == 0x7000)
|
||||
OUT_RING(evo, NvEvoFB16);
|
||||
else
|
||||
OUT_RING(evo, NvEvoVRAM);
|
||||
OUT_RING(evo, NvEvoVRAM_LP);
|
||||
}
|
||||
|
||||
ret = RING_SPACE(evo, 12);
|
||||
|
@ -574,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
if (!nv_crtc->fb.tile_flags) {
|
||||
OUT_RING(evo, drm_fb->pitch | (1 << 20));
|
||||
} else {
|
||||
OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
|
||||
fb->nvbo->tile_mode);
|
||||
u32 tile_mode = fb->nvbo->tile_mode;
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
tile_mode >>= 4;
|
||||
OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
|
||||
}
|
||||
if (dev_priv->chipset == 0x50)
|
||||
OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#include "nouveau_ramht.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
static void nv50_display_isr(struct drm_device *);
|
||||
|
||||
static inline int
|
||||
nv50_sor_nr(struct drm_device *dev)
|
||||
{
|
||||
|
@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev)
|
|||
return 4;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_evo_channel_del(struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_channel *chan = *pchan;
|
||||
|
||||
if (!chan)
|
||||
return;
|
||||
*pchan = NULL;
|
||||
|
||||
nouveau_gpuobj_channel_takedown(chan);
|
||||
nouveau_bo_unmap(chan->pushbuf_bo);
|
||||
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
|
||||
|
||||
if (chan->user)
|
||||
iounmap(chan->user);
|
||||
|
||||
kfree(chan);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
|
||||
uint32_t tile_flags, uint32_t magic_flags,
|
||||
uint32_t offset, uint32_t limit)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
|
||||
struct drm_device *dev = evo->dev;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj->engine = NVOBJ_ENGINE_DISPLAY;
|
||||
|
||||
nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
|
||||
nv_wo32(obj, 4, limit);
|
||||
nv_wo32(obj, 8, offset);
|
||||
nv_wo32(obj, 12, 0x00000000);
|
||||
nv_wo32(obj, 16, 0x00000000);
|
||||
if (dev_priv->card_type < NV_C0)
|
||||
nv_wo32(obj, 20, 0x00010000);
|
||||
else
|
||||
nv_wo32(obj, 20, 0x00020000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(evo, name, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramht = NULL;
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
*pchan = chan;
|
||||
|
||||
chan->id = -1;
|
||||
chan->dev = dev;
|
||||
chan->user_get = 4;
|
||||
chan->user_put = 0;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
|
||||
nouveau_gpuobj_ref(NULL, &ramht);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev_priv->chipset != 0x50) {
|
||||
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
|
||||
0, 0xffffffff);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
|
||||
0, 0xffffffff);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
|
||||
0, dev_priv->vram_size);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
|
||||
false, true, &chan->pushbuf_bo);
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_map(chan->pushbuf_bo);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
|
||||
nv50_evo_channel_del(pchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV50_PDISPLAY_USER(0), PAGE_SIZE);
|
||||
if (!chan->user) {
|
||||
NV_ERROR(dev, "Error mapping EVO control regs.\n");
|
||||
nv50_evo_channel_del(pchan);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_display_early_init(struct drm_device *dev)
|
||||
{
|
||||
|
@ -214,17 +63,16 @@ int
|
|||
nv50_display_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
|
||||
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
|
||||
struct nouveau_channel *evo = dev_priv->evo;
|
||||
struct drm_connector *connector;
|
||||
uint32_t val, ram_amount;
|
||||
uint64_t start;
|
||||
struct nouveau_channel *evo;
|
||||
int ret, i;
|
||||
u32 val;
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
|
||||
|
||||
/*
|
||||
* I think the 0x006101XX range is some kind of main control area
|
||||
* that enables things.
|
||||
|
@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev)
|
|||
val = nv_rd32(dev, 0x0061610c + (i * 0x800));
|
||||
nv_wr32(dev, 0x0061019c + (i * 0x10), val);
|
||||
}
|
||||
|
||||
/* DAC */
|
||||
for (i = 0; i < 3; i++) {
|
||||
val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
|
||||
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
|
||||
}
|
||||
|
||||
/* SOR */
|
||||
for (i = 0; i < nv50_sor_nr(dev); i++) {
|
||||
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
|
||||
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
|
||||
}
|
||||
|
||||
/* EXT */
|
||||
for (i = 0; i < 3; i++) {
|
||||
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
|
||||
|
@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev)
|
|||
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
|
||||
}
|
||||
|
||||
/* This used to be in crtc unblank, but seems out of place there. */
|
||||
nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
|
||||
/* RAM is clamped to 256 MiB. */
|
||||
ram_amount = dev_priv->vram_size;
|
||||
NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
|
||||
if (ram_amount > 256*1024*1024)
|
||||
ram_amount = 256*1024*1024;
|
||||
nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
|
||||
nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
|
||||
|
||||
/* The precise purpose is unknown, i suspect it has something to do
|
||||
* with text mode.
|
||||
*/
|
||||
|
@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
/* taken from nv bug #12637, attempts to un-wedge the hw if it's
|
||||
* stuck in some unspecified state
|
||||
*/
|
||||
start = ptimer->read(dev);
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
|
||||
while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
|
||||
if ((val & 0x9f0000) == 0x20000)
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
|
||||
val | 0x800000);
|
||||
|
||||
if ((val & 0x3f0000) == 0x30000)
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
|
||||
val | 0x200000);
|
||||
|
||||
if (ptimer->read(dev) - start > 1000000000ULL) {
|
||||
NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
|
||||
NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
|
||||
if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
|
||||
0x40000000, 0x40000000)) {
|
||||
NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
|
||||
NV_ERROR(dev, "0x610200 = 0x%08x\n",
|
||||
nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
|
||||
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
|
||||
|
@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
|
||||
nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
|
||||
nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
|
||||
nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
|
||||
NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
|
||||
NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
|
||||
NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
|
||||
|
||||
/* initialise fifo */
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
|
||||
((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
|
||||
NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
|
||||
NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
|
||||
if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
|
||||
NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
|
||||
NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
|
||||
return -EBUSY;
|
||||
/* enable hotplug interrupts */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct nouveau_connector *conn = nouveau_connector(connector);
|
||||
|
||||
if (conn->dcb->gpio_tag == 0xff)
|
||||
continue;
|
||||
|
||||
pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
|
||||
}
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
|
||||
(nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
|
||||
NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
|
||||
nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
|
||||
NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
|
||||
nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
|
||||
|
||||
evo->dma.max = (4096/4) - 2;
|
||||
evo->dma.put = 0;
|
||||
evo->dma.cur = evo->dma.put;
|
||||
evo->dma.free = evo->dma.max - evo->dma.cur;
|
||||
|
||||
ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
|
||||
ret = nv50_evo_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
evo = dev_priv->evo;
|
||||
|
||||
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
|
||||
OUT_RING(evo, 0);
|
||||
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
|
||||
|
||||
ret = RING_SPACE(evo, 11);
|
||||
if (ret)
|
||||
|
@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev)
|
|||
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
|
||||
NV_ERROR(dev, "evo pushbuf stalled\n");
|
||||
|
||||
/* enable clock change interrupts. */
|
||||
nv_wr32(dev, 0x610028, 0x00010001);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
|
||||
NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
|
||||
NV50_PDISPLAY_INTR_EN_CLK_UNK40));
|
||||
|
||||
/* enable hotplug interrupts */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct nouveau_connector *conn = nouveau_connector(connector);
|
||||
|
||||
if (conn->dcb->gpio_tag == 0xff)
|
||||
continue;
|
||||
|
||||
pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
|
||||
nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
|
||||
if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
|
||||
NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
|
||||
NV_ERROR(dev, "0x610200 = 0x%08x\n",
|
||||
nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
|
||||
}
|
||||
nv50_evo_fini(dev);
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
|
||||
|
@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* disable interrupts. */
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
|
||||
|
||||
/* disable hotplug interrupts */
|
||||
nv_wr32(dev, 0xe054, 0xffffffff);
|
||||
|
@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev)
|
|||
|
||||
dev->mode_config.fb_base = dev_priv->fb_phys;
|
||||
|
||||
/* Create EVO channel */
|
||||
ret = nv50_evo_channel_new(dev, &dev_priv->evo);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Create CRTC objects */
|
||||
for (i = 0; i < 2; i++)
|
||||
nv50_crtc_create(dev, i);
|
||||
|
@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
|
||||
nouveau_irq_register(dev, 26, nv50_display_isr);
|
||||
|
||||
ret = nv50_display_init(dev);
|
||||
if (ret) {
|
||||
nv50_display_destroy(dev);
|
||||
|
@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev)
|
|||
void
|
||||
nv50_display_destroy(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
nv50_display_disable(dev);
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
nouveau_irq_unregister(dev, 26);
|
||||
}
|
||||
|
||||
static u16
|
||||
|
@ -660,32 +434,32 @@ static void
|
|||
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan;
|
||||
struct list_head *entry, *tmp;
|
||||
struct nouveau_channel *chan, *tmp;
|
||||
|
||||
list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
|
||||
chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
|
||||
list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
|
||||
nvsw.vbl_wait) {
|
||||
if (chan->nvsw.vblsem_head != crtc)
|
||||
continue;
|
||||
|
||||
nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
|
||||
chan->nvsw.vblsem_rval);
|
||||
list_del(&chan->nvsw.vbl_wait);
|
||||
drm_vblank_put(dev, crtc);
|
||||
}
|
||||
|
||||
drm_handle_vblank(dev, crtc);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
|
||||
{
|
||||
intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
|
||||
|
||||
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
|
||||
nv50_display_vblank_crtc_handler(dev, 0);
|
||||
|
||||
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
|
||||
nv50_display_vblank_crtc_handler(dev, 1);
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
|
||||
NV50_PDISPLAY_INTR_EN) & ~intr);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work)
|
|||
static void
|
||||
nv50_display_error_handler(struct drm_device *dev)
|
||||
{
|
||||
uint32_t addr, data;
|
||||
u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
|
||||
u32 addr, data;
|
||||
int chid;
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
|
||||
addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
|
||||
data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
|
||||
|
||||
NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
|
||||
0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_display_irq_hotplug_bh(struct work_struct *work)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv =
|
||||
container_of(work, struct drm_nouveau_private, hpd_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_connector *connector;
|
||||
const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
|
||||
uint32_t unplug_mask, plug_mask, change_mask;
|
||||
uint32_t hpd0, hpd1;
|
||||
|
||||
spin_lock_irq(&dev_priv->hpd_state.lock);
|
||||
hpd0 = dev_priv->hpd_state.hpd0_bits;
|
||||
dev_priv->hpd_state.hpd0_bits = 0;
|
||||
hpd1 = dev_priv->hpd_state.hpd1_bits;
|
||||
dev_priv->hpd_state.hpd1_bits = 0;
|
||||
spin_unlock_irq(&dev_priv->hpd_state.lock);
|
||||
|
||||
hpd0 &= nv_rd32(dev, 0xe050);
|
||||
if (dev_priv->chipset >= 0x90)
|
||||
hpd1 &= nv_rd32(dev, 0xe070);
|
||||
|
||||
plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
|
||||
unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
|
||||
change_mask = plug_mask | unplug_mask;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
struct drm_encoder_helper_funcs *helper;
|
||||
struct nouveau_connector *nv_connector =
|
||||
nouveau_connector(connector);
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct dcb_gpio_entry *gpio;
|
||||
uint32_t reg;
|
||||
bool plugged;
|
||||
|
||||
if (!nv_connector->dcb)
|
||||
for (chid = 0; chid < 5; chid++) {
|
||||
if (!(channels & (1 << chid)))
|
||||
continue;
|
||||
|
||||
gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
|
||||
if (!gpio || !(change_mask & (1 << gpio->line)))
|
||||
continue;
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
|
||||
addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
|
||||
data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
|
||||
NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
|
||||
"(0x%04x 0x%02x)\n", chid,
|
||||
addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
|
||||
|
||||
reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
|
||||
plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
|
||||
NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
|
||||
drm_get_connector_name(connector)) ;
|
||||
|
||||
if (!connector->encoder || !connector->encoder->crtc ||
|
||||
!connector->encoder->crtc->enabled)
|
||||
continue;
|
||||
nv_encoder = nouveau_encoder(connector->encoder);
|
||||
helper = connector->encoder->helper_private;
|
||||
|
||||
if (nv_encoder->dcb->type != OUTPUT_DP)
|
||||
continue;
|
||||
|
||||
if (plugged)
|
||||
helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
|
||||
else
|
||||
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
|
||||
nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
|
||||
}
|
||||
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_display_irq_handler(struct drm_device *dev)
|
||||
static void
|
||||
nv50_display_isr(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t delayed = 0;
|
||||
|
||||
if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
|
||||
uint32_t hpd0_bits, hpd1_bits = 0;
|
||||
|
||||
hpd0_bits = nv_rd32(dev, 0xe054);
|
||||
nv_wr32(dev, 0xe054, hpd0_bits);
|
||||
|
||||
if (dev_priv->chipset >= 0x90) {
|
||||
hpd1_bits = nv_rd32(dev, 0xe074);
|
||||
nv_wr32(dev, 0xe074, hpd1_bits);
|
||||
}
|
||||
|
||||
spin_lock(&dev_priv->hpd_state.lock);
|
||||
dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
|
||||
dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
|
||||
spin_unlock(&dev_priv->hpd_state.lock);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->hpd_work);
|
||||
}
|
||||
|
||||
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
|
||||
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
|
||||
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
|
||||
|
@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev)
|
|||
if (!intr0 && !(intr1 & ~delayed))
|
||||
break;
|
||||
|
||||
if (intr0 & 0x00010000) {
|
||||
if (intr0 & 0x001f0000) {
|
||||
nv50_display_error_handler(dev);
|
||||
intr0 &= ~0x00010000;
|
||||
intr0 &= ~0x001f0000;
|
||||
}
|
||||
|
||||
if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
|
||||
|
@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,9 +35,7 @@
|
|||
#include "nouveau_crtc.h"
|
||||
#include "nv50_evo.h"
|
||||
|
||||
void nv50_display_irq_handler(struct drm_device *dev);
|
||||
void nv50_display_irq_handler_bh(struct work_struct *work);
|
||||
void nv50_display_irq_hotplug_bh(struct work_struct *work);
|
||||
int nv50_display_early_init(struct drm_device *dev);
|
||||
void nv50_display_late_takedown(struct drm_device *dev);
|
||||
int nv50_display_create(struct drm_device *dev);
|
||||
|
|
|
@ -0,0 +1,345 @@
|
|||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_ramht.h"
|
||||
|
||||
static void
|
||||
nv50_evo_channel_del(struct nouveau_channel **pevo)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct nouveau_channel *evo = *pevo;
|
||||
|
||||
if (!evo)
|
||||
return;
|
||||
*pevo = NULL;
|
||||
|
||||
dev_priv = evo->dev->dev_private;
|
||||
dev_priv->evo_alloc &= ~(1 << evo->id);
|
||||
|
||||
nouveau_gpuobj_channel_takedown(evo);
|
||||
nouveau_bo_unmap(evo->pushbuf_bo);
|
||||
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
|
||||
|
||||
if (evo->user)
|
||||
iounmap(evo->user);
|
||||
|
||||
kfree(evo);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
|
||||
u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
|
||||
u32 flags5)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
|
||||
struct drm_device *dev = evo->dev;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
obj->engine = NVOBJ_ENGINE_DISPLAY;
|
||||
|
||||
nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
|
||||
nv_wo32(obj, 4, limit);
|
||||
nv_wo32(obj, 8, offset);
|
||||
nv_wo32(obj, 12, 0x00000000);
|
||||
nv_wo32(obj, 16, 0x00000000);
|
||||
nv_wo32(obj, 20, flags5);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(evo, name, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *evo;
|
||||
int ret;
|
||||
|
||||
evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
|
||||
if (!evo)
|
||||
return -ENOMEM;
|
||||
*pevo = evo;
|
||||
|
||||
for (evo->id = 0; evo->id < 5; evo->id++) {
|
||||
if (dev_priv->evo_alloc & (1 << evo->id))
|
||||
continue;
|
||||
|
||||
dev_priv->evo_alloc |= (1 << evo->id);
|
||||
break;
|
||||
}
|
||||
|
||||
if (evo->id == 5) {
|
||||
kfree(evo);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
evo->dev = dev;
|
||||
evo->user_get = 4;
|
||||
evo->user_put = 0;
|
||||
|
||||
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
|
||||
false, true, &evo->pushbuf_bo);
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
|
||||
nv50_evo_channel_del(pevo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_map(evo->pushbuf_bo);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
|
||||
nv50_evo_channel_del(pevo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
|
||||
if (!evo->user) {
|
||||
NV_ERROR(dev, "Error mapping EVO control regs.\n");
|
||||
nv50_evo_channel_del(pevo);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* bind primary evo channel's ramht to the channel */
|
||||
if (dev_priv->evo && evo != dev_priv->evo)
|
||||
nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_evo_channel_init(struct nouveau_channel *evo)
|
||||
{
|
||||
struct drm_device *dev = evo->dev;
|
||||
int id = evo->id, ret, i;
|
||||
u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
|
||||
u32 tmp;
|
||||
|
||||
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
|
||||
if ((tmp & 0x009f0000) == 0x00020000)
|
||||
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
|
||||
|
||||
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
|
||||
if ((tmp & 0x003f0000) == 0x00030000)
|
||||
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
|
||||
|
||||
/* initialise fifo */
|
||||
nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
|
||||
NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
|
||||
NV50_PDISPLAY_EVO_DMA_CB_VALID);
|
||||
nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
|
||||
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
|
||||
NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
|
||||
NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
|
||||
if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
|
||||
NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
|
||||
nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* enable error reporting on the channel */
|
||||
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
|
||||
|
||||
evo->dma.max = (4096/4) - 2;
|
||||
evo->dma.put = 0;
|
||||
evo->dma.cur = evo->dma.put;
|
||||
evo->dma.free = evo->dma.max - evo->dma.cur;
|
||||
|
||||
ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
|
||||
OUT_RING(evo, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_evo_channel_fini(struct nouveau_channel *evo)
|
||||
{
|
||||
struct drm_device *dev = evo->dev;
|
||||
int id = evo->id;
|
||||
|
||||
nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
|
||||
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
|
||||
nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
|
||||
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
|
||||
if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
|
||||
NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
|
||||
nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_evo_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramht = NULL;
|
||||
struct nouveau_channel *evo;
|
||||
int ret;
|
||||
|
||||
/* create primary evo channel, the one we use for modesetting
|
||||
* purporses
|
||||
*/
|
||||
ret = nv50_evo_channel_new(dev, &dev_priv->evo);
|
||||
if (ret)
|
||||
return ret;
|
||||
evo = dev_priv->evo;
|
||||
|
||||
/* setup object management on it, any other evo channel will
|
||||
* use this also as there's no per-channel support on the
|
||||
* hardware
|
||||
*/
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
|
||||
nouveau_gpuobj_ref(NULL, &ramht);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* create some default objects for the scanout memtypes we support */
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
|
||||
0, 0xffffffff, 0x00000000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00020000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00000000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
if (dev_priv->chipset != 0x50) {
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
|
||||
0, 0xffffffff, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
|
||||
0, 0xffffffff, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
|
||||
0, dev_priv->vram_size, 0x00010000);
|
||||
if (ret) {
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_evo_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->evo) {
|
||||
ret = nv50_evo_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return nv50_evo_channel_init(dev_priv->evo);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_evo_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->evo) {
|
||||
nv50_evo_channel_fini(dev_priv->evo);
|
||||
nv50_evo_channel_del(&dev_priv->evo);
|
||||
}
|
||||
}
|
|
@ -24,6 +24,15 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef __NV50_EVO_H__
|
||||
#define __NV50_EVO_H__
|
||||
|
||||
int nv50_evo_init(struct drm_device *dev);
|
||||
void nv50_evo_fini(struct drm_device *dev);
|
||||
int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
|
||||
u32 tile_flags, u32 magic_flags,
|
||||
u32 offset, u32 limit);
|
||||
|
||||
#define NV50_EVO_UPDATE 0x00000080
|
||||
#define NV50_EVO_UNK84 0x00000084
|
||||
#define NV50_EVO_UNK84_NOTIFY 0x40000000
|
||||
|
@ -111,3 +120,4 @@
|
|||
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
|
||||
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3,30 +3,75 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
|
||||
struct nv50_fb_priv {
|
||||
struct page *r100c08_page;
|
||||
dma_addr_t r100c08;
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_fb_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_fb_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!priv->r100c08_page) {
|
||||
kfree(priv);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
|
||||
__free_page(priv->r100c08_page);
|
||||
kfree(priv);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
dev_priv->engine.fb.priv = priv;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fb_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_fb_priv *priv;
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->engine.fb.priv) {
|
||||
ret = nv50_fb_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
priv = dev_priv->engine.fb.priv;
|
||||
|
||||
/* Not a clue what this is exactly. Without pointing it at a
|
||||
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
|
||||
* cause IOMMU "read from address 0" errors (rh#561267)
|
||||
*/
|
||||
nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
|
||||
nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
|
||||
|
||||
/* This is needed to get meaningful information from 100c90
|
||||
* on traps. No idea what these values mean exactly. */
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x50:
|
||||
nv_wr32(dev, 0x100c90, 0x0707ff);
|
||||
nv_wr32(dev, 0x100c90, 0x000707ff);
|
||||
break;
|
||||
case 0xa3:
|
||||
case 0xa5:
|
||||
case 0xa8:
|
||||
nv_wr32(dev, 0x100c90, 0x0d0fff);
|
||||
nv_wr32(dev, 0x100c90, 0x000d0fff);
|
||||
break;
|
||||
case 0xaf:
|
||||
nv_wr32(dev, 0x100c90, 0x089d1fff);
|
||||
break;
|
||||
default:
|
||||
nv_wr32(dev, 0x100c90, 0x1d07ff);
|
||||
nv_wr32(dev, 0x100c90, 0x001d07ff);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev)
|
|||
void
|
||||
nv50_fb_takedown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_fb_priv *priv;
|
||||
|
||||
priv = dev_priv->engine.fb.priv;
|
||||
if (!priv)
|
||||
return;
|
||||
dev_priv->engine.fb.priv = NULL;
|
||||
|
||||
pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(priv->r100c08_page);
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
u32 trap[6], idx, chinst;
|
||||
int i, ch;
|
||||
|
||||
|
@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
|
|||
return;
|
||||
|
||||
chinst = (trap[2] << 16) | trap[1];
|
||||
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
|
||||
struct nouveau_channel *chan = dev_priv->fifos[ch];
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
|
||||
|
||||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
|
|||
if (chinst == chan->ramin->vinst >> 12)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
||||
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
|
||||
"channel %d (0x%08x)\n",
|
||||
|
|
|
@ -1,29 +1,46 @@
|
|||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_mm.h"
|
||||
|
||||
void
|
||||
int
|
||||
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
}
|
||||
|
||||
if (info->flags & FBINFO_HWACCEL_DISABLED) {
|
||||
cfb_fillrect(info, rect);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (rect->rop != ROP_COPY) {
|
||||
BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
|
||||
|
@ -45,27 +62,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|||
OUT_RING(chan, 3);
|
||||
}
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
}
|
||||
|
||||
if (info->flags & FBINFO_HWACCEL_DISABLED) {
|
||||
cfb_copyarea(info, region);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
BEGIN_RING(chan, NvSub2D, 0x0110, 1);
|
||||
OUT_RING(chan, 0);
|
||||
|
@ -80,9 +91,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
|||
OUT_RING(chan, 0);
|
||||
OUT_RING(chan, region->sy);
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
|
@ -92,23 +104,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
uint32_t width, dwords, *data = (uint32_t *)image->data;
|
||||
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
|
||||
uint32_t *palette = info->pseudo_palette;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
if (image->depth != 1)
|
||||
return -ENODEV;
|
||||
|
||||
if (image->depth != 1) {
|
||||
cfb_imageblit(info, image);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
}
|
||||
|
||||
if (info->flags & FBINFO_HWACCEL_DISABLED) {
|
||||
cfb_imageblit(info, image);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, 11);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
width = ALIGN(image->width, 32);
|
||||
dwords = (width * image->height) >> 5;
|
||||
|
@ -134,11 +137,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
while (dwords) {
|
||||
int push = dwords > 2047 ? 2047 : dwords;
|
||||
|
||||
if (RING_SPACE(chan, push + 1)) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
cfb_imageblit(info, image);
|
||||
return;
|
||||
}
|
||||
ret = RING_SPACE(chan, push + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dwords -= push;
|
||||
|
||||
|
@ -148,6 +149,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
|||
}
|
||||
|
||||
FIRE_RING(chan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -157,12 +159,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
struct drm_device *dev = nfbdev->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
struct nouveau_gpuobj *eng2d = NULL;
|
||||
uint64_t fb;
|
||||
struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
|
||||
int ret, format;
|
||||
|
||||
fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
|
||||
|
||||
switch (info->var.bits_per_pixel) {
|
||||
case 8:
|
||||
format = 0xf3;
|
||||
|
@ -190,12 +189,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
|
||||
nouveau_gpuobj_ref(NULL, &eng2d);
|
||||
ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -253,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
OUT_RING(chan, info->fix.line_length);
|
||||
OUT_RING(chan, info->var.xres_virtual);
|
||||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb));
|
||||
OUT_RING(chan, lower_32_bits(fb));
|
||||
OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
|
||||
OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
|
||||
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
|
||||
OUT_RING(chan, format);
|
||||
OUT_RING(chan, 1);
|
||||
|
@ -262,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
OUT_RING(chan, info->fix.line_length);
|
||||
OUT_RING(chan, info->var.xres_virtual);
|
||||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb));
|
||||
OUT_RING(chan, lower_32_bits(fb));
|
||||
OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
|
||||
OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_ramht.h"
|
||||
#include "nouveau_vm.h"
|
||||
|
||||
static void
|
||||
nv50_fifo_playlist_update(struct drm_device *dev)
|
||||
|
@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
|
|||
|
||||
/* We never schedule channel 0 or 127 */
|
||||
for (i = 1, nr = 0; i < 127; i++) {
|
||||
if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
|
||||
if (dev_priv->channels.ptr[i] &&
|
||||
dev_priv->channels.ptr[i]->ramfc) {
|
||||
nv_wo32(cur, (nr * 4), i);
|
||||
nr++;
|
||||
}
|
||||
|
@ -60,7 +62,7 @@ static void
|
|||
nv50_fifo_channel_enable(struct drm_device *dev, int channel)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *chan = dev_priv->fifos[channel];
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
|
||||
uint32_t inst;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", channel);
|
||||
|
@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev)
|
|||
{
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
nouveau_irq_register(dev, 8, nv04_fifo_isr);
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
|
||||
}
|
||||
|
@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
|
|||
NV_DEBUG(dev, "\n");
|
||||
|
||||
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
|
||||
if (dev_priv->fifos[i])
|
||||
if (dev_priv->channels.ptr[i])
|
||||
nv50_fifo_channel_enable(dev, i);
|
||||
else
|
||||
nv50_fifo_channel_disable(dev, i);
|
||||
|
@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev)
|
|||
if (!pfifo->playlist[0])
|
||||
return;
|
||||
|
||||
nv_wr32(dev, 0x2140, 0x00000000);
|
||||
nouveau_irq_unregister(dev, 8);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
|
||||
nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
|
||||
}
|
||||
|
@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
|
|||
}
|
||||
ramfc = chan->ramfc;
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV50_USER(chan->id), PAGE_SIZE);
|
||||
if (!chan->user)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
|
||||
|
@ -291,10 +302,23 @@ void
|
|||
nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_gpuobj *ramfc = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
NV_DEBUG(dev, "ch%d\n", chan->id);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
pfifo->reassign(dev, false);
|
||||
|
||||
/* Unload the context if it's the currently active one */
|
||||
if (pfifo->channel_id(dev) == chan->id) {
|
||||
pfifo->disable(dev);
|
||||
pfifo->unload_context(dev);
|
||||
pfifo->enable(dev);
|
||||
}
|
||||
|
||||
/* This will ensure the channel is seen as disabled. */
|
||||
nouveau_gpuobj_ref(chan->ramfc, &ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
|
@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
|
|||
nv50_fifo_channel_disable(dev, 127);
|
||||
nv50_fifo_playlist_update(dev);
|
||||
|
||||
pfifo->reassign(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
/* Free the channel resources */
|
||||
if (chan->user) {
|
||||
iounmap(chan->user);
|
||||
chan->user = NULL;
|
||||
}
|
||||
nouveau_gpuobj_ref(NULL, &ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->cache);
|
||||
}
|
||||
|
@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
|
|||
if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
|
||||
return 0;
|
||||
|
||||
chan = dev_priv->fifos[chid];
|
||||
chan = dev_priv->channels.ptr[chid];
|
||||
if (!chan) {
|
||||
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
|
||||
return -EINVAL;
|
||||
|
@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
|
|||
void
|
||||
nv50_fifo_tlb_flush(struct drm_device *dev)
|
||||
{
|
||||
nv50_vm_flush(dev, 5);
|
||||
nv50_vm_flush_engine(dev, 5);
|
||||
}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче