Merge drm/drm-next into drm-misc-next
Backmerge to prepare for i915-ttm topic branch. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
This commit is contained in:
Коммит
00f4471e42
|
@ -75,7 +75,7 @@ static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
|
|||
hdr->file_size = cpu_to_le32(data_end - iter->data);
|
||||
|
||||
iter->hdr++;
|
||||
iter->data += hdr->file_size;
|
||||
iter->data += le32_to_cpu(hdr->file_size);
|
||||
}
|
||||
|
||||
static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
|
||||
|
@ -85,8 +85,8 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
|
|||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
|
||||
reg->reg = etnaviv_dump_registers[i];
|
||||
reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
|
||||
reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
|
||||
reg->value = cpu_to_le32(gpu_read(gpu, etnaviv_dump_registers[i]));
|
||||
}
|
||||
|
||||
etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
|
||||
|
@ -207,7 +207,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
|||
if (!IS_ERR(pages)) {
|
||||
int j;
|
||||
|
||||
iter.hdr->data[0] = bomap - bomap_start;
|
||||
iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
|
||||
|
||||
for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
|
||||
*bomap++ = cpu_to_le64(page_to_phys(*pages++));
|
||||
|
|
|
@ -80,8 +80,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
|
|||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
*/
|
||||
if (etnaviv_obj->pages)
|
||||
kvfree(etnaviv_obj->pages);
|
||||
kvfree(etnaviv_obj->pages);
|
||||
|
||||
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
|
||||
}
|
||||
|
|
|
@ -612,14 +612,10 @@ err_submit_ww_acquire:
|
|||
err_submit_cmds:
|
||||
if (ret && (out_fence_fd >= 0))
|
||||
put_unused_fd(out_fence_fd);
|
||||
if (stream)
|
||||
kvfree(stream);
|
||||
if (bos)
|
||||
kvfree(bos);
|
||||
if (relocs)
|
||||
kvfree(relocs);
|
||||
if (pmrs)
|
||||
kvfree(pmrs);
|
||||
kvfree(stream);
|
||||
kvfree(bos);
|
||||
kvfree(relocs);
|
||||
kvfree(pmrs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -27,10 +27,6 @@
|
|||
#include "state_hi.xml.h"
|
||||
#include "cmdstream.xml.h"
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#define PHYS_OFFSET 0
|
||||
#endif
|
||||
|
||||
static const struct platform_device_id gpu_ids[] = {
|
||||
{ .name = "etnaviv-gpu,2d" },
|
||||
{ },
|
||||
|
@ -156,6 +152,18 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
|
|||
*value = ~0ULL;
|
||||
break;
|
||||
|
||||
case ETNAVIV_PARAM_GPU_PRODUCT_ID:
|
||||
*value = gpu->identity.product_id;
|
||||
break;
|
||||
|
||||
case ETNAVIV_PARAM_GPU_CUSTOMER_ID:
|
||||
*value = gpu->identity.customer_id;
|
||||
break;
|
||||
|
||||
case ETNAVIV_PARAM_GPU_ECO_ID:
|
||||
*value = gpu->identity.eco_id;
|
||||
break;
|
||||
|
||||
default:
|
||||
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
|
||||
return -EINVAL;
|
||||
|
@ -724,6 +732,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
|||
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
|
||||
dma_addr_t cmdbuf_paddr;
|
||||
int ret, i;
|
||||
|
||||
ret = pm_runtime_get_sync(gpu->dev);
|
||||
|
@ -766,28 +775,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Set the GPU linear window to be at the end of the DMA window, where
|
||||
* the CMA area is likely to reside. This ensures that we are able to
|
||||
* map the command buffers while having the linear window overlap as
|
||||
* much RAM as possible, so we can optimize mappings for other buffers.
|
||||
*
|
||||
* For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
|
||||
* to different views of the memory on the individual engines.
|
||||
*/
|
||||
if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
|
||||
(gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
|
||||
u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
|
||||
if (dma_mask < PHYS_OFFSET + SZ_2G)
|
||||
priv->mmu_global->memory_base = PHYS_OFFSET;
|
||||
else
|
||||
priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
|
||||
} else if (PHYS_OFFSET >= SZ_2G) {
|
||||
dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
|
||||
priv->mmu_global->memory_base = PHYS_OFFSET;
|
||||
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the GPU is part of a system with DMA addressing limitations,
|
||||
* request pages for our SHM backend buffers from the DMA32 zone to
|
||||
|
@ -804,6 +791,31 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the GPU linear window to cover the cmdbuf region, as the GPU
|
||||
* won't be able to start execution otherwise. The alignment to 128M is
|
||||
* chosen arbitrarily but helps in debugging, as the MMU offset
|
||||
* calculations are much more straight forward this way.
|
||||
*
|
||||
* On MC1.0 cores the linear window offset is ignored by the TS engine,
|
||||
* leading to inconsistent memory views. Avoid using the offset on those
|
||||
* cores if possible, otherwise disable the TS feature.
|
||||
*/
|
||||
cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M);
|
||||
|
||||
if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
|
||||
(gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
|
||||
if (cmdbuf_paddr >= SZ_2G)
|
||||
priv->mmu_global->memory_base = SZ_2G;
|
||||
else
|
||||
priv->mmu_global->memory_base = cmdbuf_paddr;
|
||||
} else if (cmdbuf_paddr + SZ_128M >= SZ_2G) {
|
||||
dev_info(gpu->dev,
|
||||
"Need to move linear window on MC1.0, disabling TS\n");
|
||||
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
|
||||
priv->mmu_global->memory_base = SZ_2G;
|
||||
}
|
||||
|
||||
/* Setup event management */
|
||||
spin_lock_init(&gpu->event_spinlock);
|
||||
init_completion(&gpu->event_free);
|
||||
|
@ -1771,10 +1783,8 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
|
|||
|
||||
/* Get Interrupt: */
|
||||
gpu->irq = platform_get_irq(pdev, 0);
|
||||
if (gpu->irq < 0) {
|
||||
dev_err(dev, "failed to get irq: %d\n", gpu->irq);
|
||||
if (gpu->irq < 0)
|
||||
return gpu->irq;
|
||||
}
|
||||
|
||||
err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
|
||||
dev_name(gpu->dev), gpu);
|
||||
|
|
|
@ -37,6 +37,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
|
|||
.minor_features10 = 0x0,
|
||||
.minor_features11 = 0x0,
|
||||
},
|
||||
{
|
||||
.model = 0x7000,
|
||||
.revision = 0x6204,
|
||||
.product_id = ~0U,
|
||||
.customer_id = ~0U,
|
||||
.eco_id = 0,
|
||||
.stream_count = 16,
|
||||
.register_max = 64,
|
||||
.thread_count = 512,
|
||||
.shader_core_count = 2,
|
||||
.vertex_cache_size = 16,
|
||||
.vertex_output_buffer_size = 1024,
|
||||
.pixel_pipes = 1,
|
||||
.instruction_count = 512,
|
||||
.num_constants = 320,
|
||||
.buffer_size = 0,
|
||||
.varyings_count = 16,
|
||||
.features = 0xe0287c8d,
|
||||
.minor_features0 = 0xc1589eff,
|
||||
.minor_features1 = 0xfefbfad9,
|
||||
.minor_features2 = 0xeb9d4fbf,
|
||||
.minor_features3 = 0xedfffced,
|
||||
.minor_features4 = 0xdb0dafc7,
|
||||
.minor_features5 = 0x3b5ac333,
|
||||
.minor_features6 = 0xfcce6000,
|
||||
.minor_features7 = 0xfffbfa6f,
|
||||
.minor_features8 = 0x00e10ef3,
|
||||
.minor_features9 = 0x04c8003c,
|
||||
.minor_features10 = 0x00004060,
|
||||
.minor_features11 = 0x00000024,
|
||||
},
|
||||
{
|
||||
.model = 0x7000,
|
||||
.revision = 0x6214,
|
||||
|
|
|
@ -513,8 +513,13 @@ static void decon_swreset(struct decon_context *ctx)
|
|||
static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
|
||||
{
|
||||
struct decon_context *ctx = crtc->ctx;
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
ret = pm_runtime_resume_and_get(ctx->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
exynos_drm_pipe_clk_enable(crtc, true);
|
||||
|
||||
|
|
|
@ -531,11 +531,16 @@ static void decon_init(struct decon_context *ctx)
|
|||
static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
|
||||
{
|
||||
struct decon_context *ctx = crtc->ctx;
|
||||
int ret;
|
||||
|
||||
if (!ctx->suspended)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
ret = pm_runtime_resume_and_get(ctx->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
decon_init(ctx);
|
||||
|
||||
|
|
|
@ -1383,7 +1383,12 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
|
|||
if (dsi->state & DSIM_STATE_ENABLED)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(dsi->dev);
|
||||
ret = pm_runtime_resume_and_get(dsi->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dsi->dev, "failed to enable DSI device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dsi->state |= DSIM_STATE_ENABLED;
|
||||
|
||||
if (dsi->panel) {
|
||||
|
|
|
@ -1085,8 +1085,14 @@ static int fimc_commit(struct exynos_drm_ipp *ipp,
|
|||
{
|
||||
struct fimc_context *ctx =
|
||||
container_of(ipp, struct fimc_context, ipp);
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(ctx->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "failed to enable FIMC device.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
ctx->task = task;
|
||||
|
||||
fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
|
||||
|
|
|
@ -343,13 +343,18 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
|
|||
writel(val, ctx->regs + SHADOWCON);
|
||||
}
|
||||
|
||||
static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
|
||||
static int fimd_clear_channels(struct exynos_drm_crtc *crtc)
|
||||
{
|
||||
struct fimd_context *ctx = crtc->ctx;
|
||||
unsigned int win, ch_enabled = 0;
|
||||
int ret;
|
||||
|
||||
/* Hardware is in unknown state, so ensure it gets enabled properly */
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
ret = pm_runtime_resume_and_get(ctx->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "failed to enable FIMD device.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
clk_prepare_enable(ctx->bus_clk);
|
||||
clk_prepare_enable(ctx->lcd_clk);
|
||||
|
@ -384,6 +389,8 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
|
|||
clk_disable_unprepare(ctx->bus_clk);
|
||||
|
||||
pm_runtime_put(ctx->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -905,7 +912,10 @@ static void fimd_atomic_enable(struct exynos_drm_crtc *crtc)
|
|||
|
||||
ctx->suspended = false;
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
if (pm_runtime_resume_and_get(ctx->dev) < 0) {
|
||||
dev_warn(ctx->dev, "failed to enable FIMD device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* if vblank was enabled status, enable it again. */
|
||||
if (test_and_clear_bit(0, &ctx->irq_flags))
|
||||
|
@ -1089,8 +1099,13 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
|
|||
if (ctx->encoder)
|
||||
exynos_dpi_bind(drm_dev, ctx->encoder);
|
||||
|
||||
if (is_drm_iommu_supported(drm_dev))
|
||||
fimd_clear_channels(ctx->crtc);
|
||||
if (is_drm_iommu_supported(drm_dev)) {
|
||||
int ret;
|
||||
|
||||
ret = fimd_clear_channels(ctx->crtc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
}
|
||||
|
|
|
@ -892,7 +892,14 @@ static void g2d_runqueue_worker(struct work_struct *work)
|
|||
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
|
||||
|
||||
if (g2d->runqueue_node) {
|
||||
pm_runtime_get_sync(g2d->dev);
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(g2d->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(g2d->dev, "failed to enable G2D device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
g2d_dma_start(g2d, g2d->runqueue_node);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1118,7 +1118,12 @@ static int gsc_commit(struct exynos_drm_ipp *ipp,
|
|||
struct gsc_context *ctx = container_of(ipp, struct gsc_context, ipp);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
ret = pm_runtime_resume_and_get(ctx->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "failed to enable GScaler device.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->task = task;
|
||||
|
||||
ret = gsc_reset(ctx);
|
||||
|
|
|
@ -268,11 +268,9 @@ static void mic_pre_enable(struct drm_bridge *bridge)
|
|||
if (mic->enabled)
|
||||
goto unlock;
|
||||
|
||||
ret = pm_runtime_get_sync(mic->dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(mic->dev);
|
||||
ret = pm_runtime_resume_and_get(mic->dev);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mic_set_path(mic, 1);
|
||||
|
||||
|
|
|
@ -219,8 +219,13 @@ static int rotator_commit(struct exynos_drm_ipp *ipp,
|
|||
{
|
||||
struct rot_context *rot =
|
||||
container_of(ipp, struct rot_context, ipp);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(rot->dev);
|
||||
ret = pm_runtime_resume_and_get(rot->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(rot->dev, "failed to enable ROTATOR device.\n");
|
||||
return ret;
|
||||
}
|
||||
rot->task = task;
|
||||
|
||||
rotator_src_set_fmt(rot, task->src.buf.fourcc);
|
||||
|
|
|
@ -362,15 +362,17 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
|
|||
struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
|
||||
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
|
||||
const struct scaler_format *src_fmt, *dst_fmt;
|
||||
int ret = 0;
|
||||
|
||||
src_fmt = scaler_get_format(task->src.buf.fourcc);
|
||||
dst_fmt = scaler_get_format(task->dst.buf.fourcc);
|
||||
|
||||
pm_runtime_get_sync(scaler->dev);
|
||||
if (scaler_reset(scaler)) {
|
||||
pm_runtime_put(scaler->dev);
|
||||
ret = pm_runtime_resume_and_get(scaler->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (scaler_reset(scaler))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
scaler->task = task;
|
||||
|
||||
|
|
|
@ -1483,10 +1483,16 @@ static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
|
|||
/* Should be called with hdata->mutex mutex held. */
|
||||
static void hdmiphy_enable(struct hdmi_context *hdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (hdata->powered)
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(hdata->dev);
|
||||
ret = pm_runtime_resume_and_get(hdata->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(hdata->dev, "failed to enable HDMIPHY device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
|
||||
DRM_DEV_DEBUG_KMS(hdata->dev,
|
||||
|
|
|
@ -992,11 +992,16 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
|
|||
static void mixer_atomic_enable(struct exynos_drm_crtc *crtc)
|
||||
{
|
||||
struct mixer_context *ctx = crtc->ctx;
|
||||
int ret;
|
||||
|
||||
if (test_bit(MXR_BIT_POWERED, &ctx->flags))
|
||||
return;
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
ret = pm_runtime_resume_and_get(ctx->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "failed to enable MIXER device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
exynos_drm_pipe_clk_enable(crtc, true);
|
||||
|
||||
|
|
|
@ -20,13 +20,13 @@ config DRM_I915
|
|||
select INPUT if ACPI
|
||||
select ACPI_VIDEO if ACPI
|
||||
select ACPI_BUTTON if ACPI
|
||||
select IO_MAPPING
|
||||
select SYNC_FILE
|
||||
select IOSF_MBI
|
||||
select CRC32
|
||||
select SND_HDA_I915 if SND_HDA_CORE
|
||||
select CEC_CORE if CEC_NOTIFIER
|
||||
select VMAP_PFN
|
||||
select DRM_TTM
|
||||
help
|
||||
Choose this option if you have a system that has "Intel Graphics
|
||||
Media Accelerator" or "HD Graphics" integrated graphics,
|
||||
|
|
|
@ -50,6 +50,7 @@ i915-y += i915_drv.o \
|
|||
intel_memory_region.o \
|
||||
intel_pch.o \
|
||||
intel_pm.o \
|
||||
intel_region_ttm.o \
|
||||
intel_runtime_pm.o \
|
||||
intel_sideband.o \
|
||||
intel_step.o \
|
||||
|
@ -160,7 +161,6 @@ gem-y += \
|
|||
i915-y += \
|
||||
$(gem-y) \
|
||||
i915_active.o \
|
||||
i915_buddy.o \
|
||||
i915_cmd_parser.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_gtt.o \
|
||||
|
|
|
@ -1190,7 +1190,7 @@ static void set_ppgtt_barrier(void *data)
|
|||
{
|
||||
struct i915_address_space *old = data;
|
||||
|
||||
if (INTEL_GEN(old->i915) < 8)
|
||||
if (GRAPHICS_VER(old->i915) < 8)
|
||||
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
|
||||
|
||||
i915_vm_close(old);
|
||||
|
@ -1436,7 +1436,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
|
|||
context->max_eus_per_subslice = user->max_eus_per_subslice;
|
||||
|
||||
/* Part specific restrictions. */
|
||||
if (IS_GEN(i915, 11)) {
|
||||
if (GRAPHICS_VER(i915) == 11) {
|
||||
unsigned int hw_s = hweight8(device->slice_mask);
|
||||
unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
|
||||
unsigned int req_s = hweight8(context->slice_mask);
|
||||
|
@ -1503,7 +1503,7 @@ static int set_sseu(struct i915_gem_context *ctx,
|
|||
if (args->size < sizeof(user_sseu))
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_GEN(i915, 11))
|
||||
if (GRAPHICS_VER(i915) != 11)
|
||||
return -ENODEV;
|
||||
|
||||
if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
|
||||
|
|
|
@ -209,7 +209,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
|||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
sg_page_sizes = i915_sg_page_sizes(pages->sgl);
|
||||
sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
|
||||
|
||||
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
|
||||
|
||||
|
|
|
@ -500,7 +500,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
|
|||
* also covers all platforms with local memory.
|
||||
*/
|
||||
if (entry->relocation_count &&
|
||||
INTEL_GEN(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
|
||||
GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(entry->flags & eb->invalid_flags))
|
||||
|
@ -1439,7 +1439,7 @@ err_pool:
|
|||
|
||||
static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
|
||||
{
|
||||
return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
|
||||
return engine->class != VIDEO_DECODE_CLASS || GRAPHICS_VER(engine->i915) != 6;
|
||||
}
|
||||
|
||||
static u32 *reloc_gpu(struct i915_execbuffer *eb,
|
||||
|
@ -1671,7 +1671,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
|
|||
* batchbuffers.
|
||||
*/
|
||||
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
||||
IS_GEN(eb->i915, 6)) {
|
||||
GRAPHICS_VER(eb->i915) == 6) {
|
||||
err = i915_vma_bind(target->vma,
|
||||
target->vma->obj->cache_level,
|
||||
PIN_GLOBAL, NULL);
|
||||
|
@ -2332,7 +2332,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
|||
u32 *cs;
|
||||
int i;
|
||||
|
||||
if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
|
||||
if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
|
||||
drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3375,7 +3375,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
|
||||
eb.batch_flags = 0;
|
||||
if (args->flags & I915_EXEC_SECURE) {
|
||||
if (INTEL_GEN(i915) >= 11)
|
||||
if (GRAPHICS_VER(i915) >= 11)
|
||||
return -ENODEV;
|
||||
|
||||
/* Return -EPERM to trigger fallback code on old binaries. */
|
||||
|
|
|
@ -4,16 +4,71 @@
|
|||
*/
|
||||
|
||||
#include "intel_memory_region.h"
|
||||
#include "intel_region_ttm.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void lmem_put_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
|
||||
obj->mm.dirty = false;
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
static int lmem_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
unsigned int flags;
|
||||
struct sg_table *pages;
|
||||
|
||||
flags = I915_ALLOC_MIN_PAGE_SIZE;
|
||||
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
|
||||
flags |= I915_ALLOC_CONTIGUOUS;
|
||||
|
||||
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
|
||||
obj->base.size,
|
||||
flags);
|
||||
if (IS_ERR(obj->mm.st_mm_node))
|
||||
return PTR_ERR(obj->mm.st_mm_node);
|
||||
|
||||
/* Range manager is always contigous */
|
||||
if (obj->mm.region->is_range_manager)
|
||||
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
|
||||
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
|
||||
if (IS_ERR(pages)) {
|
||||
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
|
||||
return PTR_ERR(pages);
|
||||
}
|
||||
|
||||
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
|
||||
|
||||
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
|
||||
void __iomem *vaddr =
|
||||
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
|
||||
|
||||
if (!vaddr) {
|
||||
struct sg_table *pages =
|
||||
__i915_gem_object_unset_pages(obj);
|
||||
|
||||
if (!IS_ERR_OR_NULL(pages))
|
||||
lmem_put_pages(obj, pages);
|
||||
}
|
||||
|
||||
memset_io(vaddr, 0, obj->base.size);
|
||||
io_mapping_unmap(vaddr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
|
||||
.name = "i915_gem_object_lmem",
|
||||
.flags = I915_GEM_OBJECT_HAS_IOMEM,
|
||||
|
||||
.get_pages = i915_gem_object_get_pages_buddy,
|
||||
.put_pages = i915_gem_object_put_pages_buddy,
|
||||
.get_pages = lmem_get_pages,
|
||||
.put_pages = lmem_put_pages,
|
||||
.release = i915_gem_object_release_memory_region,
|
||||
};
|
||||
|
||||
|
|
|
@ -56,10 +56,17 @@ int
|
|||
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct drm_i915_gem_mmap *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long addr;
|
||||
|
||||
/* mmap ioctl is disallowed for all platforms after TGL-LP. This also
|
||||
* covers all platforms with local memory.
|
||||
*/
|
||||
if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (args->flags & ~(I915_MMAP_WC))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -367,10 +374,11 @@ retry:
|
|||
goto err_unpin;
|
||||
|
||||
/* Finally, remap it using the new GTT offset */
|
||||
ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
|
||||
(vma->ggtt_view.partial.offset << PAGE_SHIFT),
|
||||
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
|
||||
min_t(u64, vma->size, area->vm_end - area->vm_start));
|
||||
ret = remap_io_mapping(area,
|
||||
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
|
||||
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
|
||||
min_t(u64, vma->size, area->vm_end - area->vm_start),
|
||||
&ggtt->iomap);
|
||||
if (ret)
|
||||
goto err_fence;
|
||||
|
||||
|
|
|
@ -62,6 +62,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
const struct drm_i915_gem_object_ops *ops,
|
||||
struct lock_class_key *key, unsigned flags)
|
||||
{
|
||||
/*
|
||||
* A gem object is embedded both in a struct ttm_buffer_object :/ and
|
||||
* in a drm_i915_gem_object. Make sure they are aliased.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
|
||||
offsetof(typeof(*obj), __do_not_access.base));
|
||||
|
||||
spin_lock_init(&obj->vma.lock);
|
||||
INIT_LIST_HEAD(&obj->vma.list);
|
||||
|
||||
|
@ -252,6 +259,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
if (obj->mm.n_placements > 1)
|
||||
kfree(obj->mm.placements);
|
||||
|
||||
if (obj->shares_resv_from)
|
||||
i915_vm_resv_put(obj->shares_resv_from);
|
||||
|
||||
/* But keep the pointer alive for RCU-protected lookups */
|
||||
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
|
||||
cond_resched();
|
||||
|
|
|
@ -72,7 +72,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
|||
|
||||
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
|
||||
|
||||
if (INTEL_GEN(i915) >= 8) {
|
||||
if (GRAPHICS_VER(i915) >= 8) {
|
||||
*cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
|
||||
*cmd++ = 0;
|
||||
|
@ -232,7 +232,7 @@ static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
|
|||
{
|
||||
u32 height = size >> PAGE_SHIFT;
|
||||
|
||||
if (!IS_GEN(i915, 11))
|
||||
if (GRAPHICS_VER(i915) != 11)
|
||||
return false;
|
||||
|
||||
return height % 4 == 3 && height <= 8;
|
||||
|
@ -297,7 +297,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
|||
size = min_t(u64, rem, block_size);
|
||||
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
|
||||
|
||||
if (INTEL_GEN(i915) >= 9 &&
|
||||
if (GRAPHICS_VER(i915) >= 9 &&
|
||||
!wa_1209644611_applies(i915, size)) {
|
||||
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
|
||||
|
@ -309,7 +309,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
|||
*cmd++ = PAGE_SIZE;
|
||||
*cmd++ = lower_32_bits(src_offset);
|
||||
*cmd++ = upper_32_bits(src_offset);
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
*cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
|
||||
*cmd++ = 0;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
#include "i915_active.h"
|
||||
|
@ -99,7 +100,16 @@ struct i915_gem_object_page_iter {
|
|||
};
|
||||
|
||||
struct drm_i915_gem_object {
|
||||
struct drm_gem_object base;
|
||||
/*
|
||||
* We might have reason to revisit the below since it wastes
|
||||
* a lot of space for non-ttm gem objects.
|
||||
* In any case, always use the accessors for the ttm_buffer_object
|
||||
* when accessing it.
|
||||
*/
|
||||
union {
|
||||
struct drm_gem_object base;
|
||||
struct ttm_buffer_object __do_not_access;
|
||||
};
|
||||
|
||||
const struct drm_i915_gem_object_ops *ops;
|
||||
|
||||
|
@ -149,6 +159,10 @@ struct drm_i915_gem_object {
|
|||
* when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
|
||||
*/
|
||||
struct list_head obj_link;
|
||||
/**
|
||||
* @shared_resv_from: The object shares the resv from this vm.
|
||||
*/
|
||||
struct i915_address_space *shares_resv_from;
|
||||
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
|
@ -231,10 +245,12 @@ struct drm_i915_gem_object {
|
|||
* Memory region for this object.
|
||||
*/
|
||||
struct intel_memory_region *region;
|
||||
|
||||
/**
|
||||
* List of memory region blocks allocated for this object.
|
||||
* Memory manager node allocated for this object.
|
||||
*/
|
||||
struct list_head blocks;
|
||||
void *st_mm_node;
|
||||
|
||||
/**
|
||||
* Element within memory_region->objects or region->purgeable
|
||||
* if the object is marked as DONTNEED. Access is protected by
|
||||
|
|
|
@ -475,7 +475,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
|||
|
||||
might_sleep();
|
||||
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
if (!i915_gem_object_has_pinned_pages(obj))
|
||||
assert_object_held(obj);
|
||||
|
||||
/* As we iterate forward through the sg, we record each entry in a
|
||||
* radixtree for quick repeated (backwards) lookups. If we have seen
|
||||
|
|
|
@ -207,7 +207,7 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
|
|||
|
||||
err_xfer:
|
||||
if (!IS_ERR_OR_NULL(pages)) {
|
||||
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
|
||||
unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
|
||||
|
||||
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
|
||||
}
|
||||
|
|
|
@ -8,129 +8,9 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
void
|
||||
i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
__intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
|
||||
|
||||
obj->mm.dirty = false;
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
const u64 max_segment = i915_sg_segment_size();
|
||||
struct intel_memory_region *mem = obj->mm.region;
|
||||
struct list_head *blocks = &obj->mm.blocks;
|
||||
resource_size_t size = obj->base.size;
|
||||
resource_size_t prev_end;
|
||||
struct i915_buddy_block *block;
|
||||
unsigned int flags;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
unsigned int sg_page_sizes;
|
||||
int ret;
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (!st)
|
||||
return -ENOMEM;
|
||||
|
||||
if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
|
||||
kfree(st);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
flags = I915_ALLOC_MIN_PAGE_SIZE;
|
||||
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
|
||||
flags |= I915_ALLOC_CONTIGUOUS;
|
||||
|
||||
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
|
||||
if (ret)
|
||||
goto err_free_sg;
|
||||
|
||||
GEM_BUG_ON(list_empty(blocks));
|
||||
|
||||
sg = st->sgl;
|
||||
st->nents = 0;
|
||||
sg_page_sizes = 0;
|
||||
prev_end = (resource_size_t)-1;
|
||||
|
||||
list_for_each_entry(block, blocks, link) {
|
||||
u64 block_size, offset;
|
||||
|
||||
block_size = min_t(u64, size,
|
||||
i915_buddy_block_size(&mem->mm, block));
|
||||
offset = i915_buddy_block_offset(block);
|
||||
|
||||
while (block_size) {
|
||||
u64 len;
|
||||
|
||||
if (offset != prev_end || sg->length >= max_segment) {
|
||||
if (st->nents) {
|
||||
sg_page_sizes |= sg->length;
|
||||
sg = __sg_next(sg);
|
||||
}
|
||||
|
||||
sg_dma_address(sg) = mem->region.start + offset;
|
||||
sg_dma_len(sg) = 0;
|
||||
sg->length = 0;
|
||||
st->nents++;
|
||||
}
|
||||
|
||||
len = min(block_size, max_segment - sg->length);
|
||||
sg->length += len;
|
||||
sg_dma_len(sg) += len;
|
||||
|
||||
offset += len;
|
||||
block_size -= len;
|
||||
|
||||
prev_end = offset;
|
||||
}
|
||||
}
|
||||
|
||||
sg_page_sizes |= sg->length;
|
||||
sg_mark_end(sg);
|
||||
i915_sg_trim(st);
|
||||
|
||||
/* Intended for kernel internal use only */
|
||||
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
|
||||
struct scatterlist *sg;
|
||||
unsigned long i;
|
||||
|
||||
for_each_sg(st->sgl, sg, st->nents, i) {
|
||||
unsigned int length;
|
||||
void __iomem *vaddr;
|
||||
dma_addr_t daddr;
|
||||
|
||||
daddr = sg_dma_address(sg);
|
||||
daddr -= mem->region.start;
|
||||
length = sg_dma_len(sg);
|
||||
|
||||
vaddr = io_mapping_map_wc(&mem->iomap, daddr, length);
|
||||
memset64((void __force *)vaddr, 0, length / sizeof(u64));
|
||||
io_mapping_unmap(vaddr);
|
||||
}
|
||||
|
||||
wmb();
|
||||
}
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_sg:
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
|
||||
struct intel_memory_region *mem)
|
||||
{
|
||||
INIT_LIST_HEAD(&obj->mm.blocks);
|
||||
obj->mm.region = intel_memory_region_get(mem);
|
||||
|
||||
if (obj->base.size <= mem->min_page_size)
|
||||
|
|
|
@ -12,10 +12,6 @@ struct intel_memory_region;
|
|||
struct drm_i915_gem_object;
|
||||
struct sg_table;
|
||||
|
||||
int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages);
|
||||
|
||||
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
|
||||
struct intel_memory_region *mem);
|
||||
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
|
||||
|
|
|
@ -628,11 +628,13 @@ static const struct intel_memory_region_ops shmem_region_ops = {
|
|||
.init_object = shmem_object_init,
|
||||
};
|
||||
|
||||
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
|
||||
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
|
||||
u16 type, u16 instance)
|
||||
{
|
||||
return intel_memory_region_create(i915, 0,
|
||||
totalram_pages() << PAGE_SHIFT,
|
||||
PAGE_SIZE, 0,
|
||||
type, instance,
|
||||
&shmem_region_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
|
|||
return -ENODEV;
|
||||
|
||||
/* WaSkipStolenMemoryFirstPage:bdw+ */
|
||||
if (INTEL_GEN(i915) >= 8 && start < 4096)
|
||||
if (GRAPHICS_VER(i915) >= 8 && start < 4096)
|
||||
start = 4096;
|
||||
|
||||
mutex_lock(&i915->mm.stolen_lock);
|
||||
|
@ -84,14 +84,14 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
|
|||
*/
|
||||
|
||||
/* Make sure we don't clobber the GTT if it's within stolen memory */
|
||||
if (INTEL_GEN(i915) <= 4 &&
|
||||
if (GRAPHICS_VER(i915) <= 4 &&
|
||||
!IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
|
||||
struct resource stolen[2] = {*dsm, *dsm};
|
||||
struct resource ggtt_res;
|
||||
resource_size_t ggtt_start;
|
||||
|
||||
ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
|
||||
if (IS_GEN(i915, 4))
|
||||
if (GRAPHICS_VER(i915) == 4)
|
||||
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
|
||||
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
|
||||
else
|
||||
|
@ -156,7 +156,7 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
|
|||
* GEN3 firmware likes to smash pci bridges into the stolen
|
||||
* range. Apparently this works.
|
||||
*/
|
||||
if (!r && !IS_GEN(i915, 3)) {
|
||||
if (!r && GRAPHICS_VER(i915) != 3) {
|
||||
drm_err(&i915->drm,
|
||||
"conflict detected with stolen region: %pR\n",
|
||||
dsm);
|
||||
|
@ -197,7 +197,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
|
|||
* Whether ILK really reuses the ELK register for this is unclear.
|
||||
* Let's see if we catch anyone with this supposedly enabled on ILK.
|
||||
*/
|
||||
drm_WARN(&i915->drm, IS_GEN(i915, 5),
|
||||
drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
|
||||
"ILK stolen reserved found? 0x%08x\n",
|
||||
reg_val);
|
||||
|
||||
|
@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
|
||||
if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) {
|
||||
drm_notice(&i915->drm,
|
||||
"%s, disabling use of stolen memory\n",
|
||||
"DMAR active");
|
||||
|
@ -421,7 +421,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
|
|||
reserved_base = stolen_top;
|
||||
reserved_size = 0;
|
||||
|
||||
switch (INTEL_GEN(i915)) {
|
||||
switch (GRAPHICS_VER(i915)) {
|
||||
case 2:
|
||||
case 3:
|
||||
break;
|
||||
|
@ -456,7 +456,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
|
|||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
fallthrough;
|
||||
case 11:
|
||||
case 12:
|
||||
|
@ -772,7 +772,8 @@ static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
|
|||
};
|
||||
|
||||
struct intel_memory_region *
|
||||
i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
|
||||
i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
|
||||
u16 instance)
|
||||
{
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
@ -790,6 +791,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
|
|||
|
||||
mem = intel_memory_region_create(i915, lmem_base, lmem_size,
|
||||
I915_GTT_PAGE_SIZE_4K, io_start,
|
||||
type, instance,
|
||||
&i915_region_stolen_lmem_ops);
|
||||
if (IS_ERR(mem))
|
||||
return mem;
|
||||
|
@ -811,14 +813,15 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
|
|||
}
|
||||
|
||||
struct intel_memory_region*
|
||||
i915_gem_stolen_smem_setup(struct drm_i915_private *i915)
|
||||
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
|
||||
u16 instance)
|
||||
{
|
||||
struct intel_memory_region *mem;
|
||||
|
||||
mem = intel_memory_region_create(i915,
|
||||
intel_graphics_stolen_res.start,
|
||||
resource_size(&intel_graphics_stolen_res),
|
||||
PAGE_SIZE, 0,
|
||||
PAGE_SIZE, 0, type, instance,
|
||||
&i915_region_stolen_smem_ops);
|
||||
if (IS_ERR(mem))
|
||||
return mem;
|
||||
|
@ -826,7 +829,6 @@ i915_gem_stolen_smem_setup(struct drm_i915_private *i915)
|
|||
intel_memory_region_set_name(mem, "stolen-system");
|
||||
|
||||
mem->private = true;
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,13 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
|
|||
u64 end);
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node);
|
||||
struct intel_memory_region *i915_gem_stolen_smem_setup(struct drm_i915_private *i915);
|
||||
struct intel_memory_region *i915_gem_stolen_lmem_setup(struct drm_i915_private *i915);
|
||||
struct intel_memory_region *
|
||||
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
|
||||
u16 instance);
|
||||
struct intel_memory_region *
|
||||
i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
|
||||
u16 instance);
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
|
||||
resource_size_t size);
|
||||
|
|
|
@ -62,14 +62,14 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
|
|||
|
||||
GEM_BUG_ON(!stride);
|
||||
|
||||
if (INTEL_GEN(i915) >= 4) {
|
||||
if (GRAPHICS_VER(i915) >= 4) {
|
||||
stride *= i915_gem_tile_height(tiling);
|
||||
GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
|
||||
return roundup(size, stride);
|
||||
}
|
||||
|
||||
/* Previous chips need a power-of-two fence region when tiling */
|
||||
if (IS_GEN(i915, 3))
|
||||
if (GRAPHICS_VER(i915) == 3)
|
||||
ggtt_size = 1024*1024;
|
||||
else
|
||||
ggtt_size = 512*1024;
|
||||
|
@ -102,7 +102,7 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
|
|||
if (tiling == I915_TILING_NONE)
|
||||
return I915_GTT_MIN_ALIGNMENT;
|
||||
|
||||
if (INTEL_GEN(i915) >= 4)
|
||||
if (GRAPHICS_VER(i915) >= 4)
|
||||
return I965_FENCE_PAGE;
|
||||
|
||||
/*
|
||||
|
@ -130,10 +130,10 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
|
|||
/* check maximum stride & object size */
|
||||
/* i965+ stores the end address of the gtt mapping in the fence
|
||||
* reg, so dont bother to check the size */
|
||||
if (INTEL_GEN(i915) >= 7) {
|
||||
if (GRAPHICS_VER(i915) >= 7) {
|
||||
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
|
||||
return false;
|
||||
} else if (INTEL_GEN(i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(i915) >= 4) {
|
||||
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
|
||||
return false;
|
||||
} else {
|
||||
|
@ -144,7 +144,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 2) ||
|
||||
if (GRAPHICS_VER(i915) == 2 ||
|
||||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
|
||||
tile_width = 128;
|
||||
else
|
||||
|
|
|
@ -173,7 +173,7 @@ alloc_table:
|
|||
goto err;
|
||||
}
|
||||
|
||||
sg_page_sizes = i915_sg_page_sizes(st->sgl);
|
||||
sg_page_sizes = i915_sg_dma_sizes(st->sgl);
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
|
||||
|
||||
|
|
|
@ -152,8 +152,8 @@ static int prepare_blit(const struct tiled_blits *t,
|
|||
struct blit_buffer *src,
|
||||
struct drm_i915_gem_object *batch)
|
||||
{
|
||||
const int gen = INTEL_GEN(to_i915(batch->base.dev));
|
||||
bool use_64b_reloc = gen >= 8;
|
||||
const int ver = GRAPHICS_VER(to_i915(batch->base.dev));
|
||||
bool use_64b_reloc = ver >= 8;
|
||||
u32 src_pitch, dst_pitch;
|
||||
u32 cmd, *cs;
|
||||
|
||||
|
@ -171,7 +171,7 @@ static int prepare_blit(const struct tiled_blits *t,
|
|||
*cs++ = cmd;
|
||||
|
||||
cmd = MI_FLUSH_DW;
|
||||
if (gen >= 8)
|
||||
if (ver >= 8)
|
||||
cmd++;
|
||||
*cs++ = cmd;
|
||||
*cs++ = 0;
|
||||
|
@ -179,7 +179,7 @@ static int prepare_blit(const struct tiled_blits *t,
|
|||
*cs++ = 0;
|
||||
|
||||
cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
|
||||
if (gen >= 8)
|
||||
if (ver >= 8)
|
||||
cmd += 2;
|
||||
|
||||
src_pitch = t->width * 4;
|
||||
|
@ -666,7 +666,7 @@ static int igt_client_tiled_blits(void *arg)
|
|||
int inst = 0;
|
||||
|
||||
/* Test requires explicit BLT tiling controls */
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
return 0;
|
||||
|
||||
if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
|
||||
|
|
|
@ -221,12 +221,12 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
|
|||
goto out_rq;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(ctx->engine->i915) >= 8) {
|
||||
if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
|
||||
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
|
||||
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
|
||||
*cs++ = v;
|
||||
} else if (INTEL_GEN(ctx->engine->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(ctx->engine->i915) >= 4) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = i915_ggtt_offset(vma) + offset;
|
||||
|
|
|
@ -897,7 +897,7 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *v
|
|||
{
|
||||
u32 *cmd;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(vma->vm->i915) < 8);
|
||||
|
||||
cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB);
|
||||
if (IS_ERR(cmd))
|
||||
|
@ -932,7 +932,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
|||
|
||||
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
if (GRAPHICS_VER(i915) < 8)
|
||||
return -EINVAL;
|
||||
|
||||
vma = i915_vma_instance(obj, ce->vm, NULL);
|
||||
|
@ -1100,7 +1100,7 @@ __read_slice_count(struct intel_context *ce,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(ce->engine->i915) >= 11) {
|
||||
if (GRAPHICS_VER(ce->engine->i915) >= 11) {
|
||||
s_mask = GEN11_RPCS_S_CNT_MASK;
|
||||
s_shift = GEN11_RPCS_S_CNT_SHIFT;
|
||||
} else {
|
||||
|
@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
|||
int inst = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (INTEL_GEN(i915) < 9)
|
||||
if (GRAPHICS_VER(i915) < 9)
|
||||
return 0;
|
||||
|
||||
if (flags & TEST_RESET)
|
||||
|
@ -1518,7 +1518,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
|
|||
}
|
||||
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
if (INTEL_GEN(i915) >= 8) {
|
||||
if (GRAPHICS_VER(i915) >= 8) {
|
||||
*cmd++ = lower_32_bits(offset);
|
||||
*cmd++ = upper_32_bits(offset);
|
||||
} else {
|
||||
|
@ -1608,7 +1608,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
|||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
if (INTEL_GEN(i915) >= 8) {
|
||||
if (GRAPHICS_VER(i915) >= 8) {
|
||||
const u32 GPR0 = engine->mmio_base + 0x600;
|
||||
|
||||
vm = i915_gem_context_get_vm_rcu(ctx);
|
||||
|
@ -1776,7 +1776,7 @@ static int igt_vm_isolation(void *arg)
|
|||
u32 expected;
|
||||
int err;
|
||||
|
||||
if (INTEL_GEN(i915) < 7)
|
||||
if (GRAPHICS_VER(i915) < 7)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -1830,7 +1830,7 @@ static int igt_vm_isolation(void *arg)
|
|||
continue;
|
||||
|
||||
/* Not all engines have their own GPR! */
|
||||
if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(i915) < 8 && engine->class != RENDER_CLASS)
|
||||
continue;
|
||||
|
||||
while (!__igt_timeout(end_time, NULL)) {
|
||||
|
|
|
@ -273,7 +273,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
|
|||
static unsigned int
|
||||
setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) <= 2) {
|
||||
if (GRAPHICS_VER(i915) <= 2) {
|
||||
tile->height = 16;
|
||||
tile->width = 128;
|
||||
tile->size = 11;
|
||||
|
@ -288,9 +288,9 @@ setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
|
|||
tile->size = 12;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
return 8192 / tile->width;
|
||||
else if (INTEL_GEN(i915) < 7)
|
||||
else if (GRAPHICS_VER(i915) < 7)
|
||||
return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
|
||||
else
|
||||
return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
|
||||
|
@ -386,7 +386,7 @@ static int igt_partial_tiling(void *arg)
|
|||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
if (pitch > 2 && INTEL_GEN(i915) >= 4) {
|
||||
if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
|
||||
tile.stride = tile.width * (pitch - 1);
|
||||
err = check_partial_mappings(obj, &tile, end);
|
||||
if (err == -EINTR)
|
||||
|
@ -395,7 +395,7 @@ static int igt_partial_tiling(void *arg)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
|
||||
if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
|
||||
tile.stride = tile.width * (pitch + 1);
|
||||
err = check_partial_mappings(obj, &tile, end);
|
||||
if (err == -EINTR)
|
||||
|
@ -405,7 +405,7 @@ static int igt_partial_tiling(void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) >= 4) {
|
||||
if (GRAPHICS_VER(i915) >= 4) {
|
||||
for_each_prime_number(pitch, max_pitch) {
|
||||
tile.stride = tile.width * pitch;
|
||||
err = check_partial_mappings(obj, &tile, end);
|
||||
|
@ -501,7 +501,7 @@ static int igt_smoke_tiling(void *arg)
|
|||
tile.stride =
|
||||
i915_prandom_u32_max_state(max_pitch, &prng);
|
||||
tile.stride = (1 + tile.stride) * tile.width;
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
tile.stride = rounddown_pow_of_two(tile.stride);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ igt_emit_store_dw(struct i915_vma *vma,
|
|||
u32 val)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
const int gen = INTEL_GEN(vma->vm->i915);
|
||||
const int ver = GRAPHICS_VER(vma->vm->i915);
|
||||
unsigned long n, size;
|
||||
u32 *cmd;
|
||||
int err;
|
||||
|
@ -65,14 +65,14 @@ igt_emit_store_dw(struct i915_vma *vma,
|
|||
offset += vma->node.start;
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
if (gen >= 8) {
|
||||
if (ver >= 8) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*cmd++ = lower_32_bits(offset);
|
||||
*cmd++ = upper_32_bits(offset);
|
||||
*cmd++ = val;
|
||||
} else if (gen >= 4) {
|
||||
} else if (ver >= 4) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
|
||||
(gen < 6 ? MI_USE_GGTT : 0);
|
||||
(ver < 6 ? MI_USE_GGTT : 0);
|
||||
*cmd++ = 0;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = val;
|
||||
|
@ -146,7 +146,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
|
|||
goto skip_request;
|
||||
|
||||
flags = 0;
|
||||
if (INTEL_GEN(ce->vm->i915) <= 5)
|
||||
if (GRAPHICS_VER(ce->vm->i915) <= 5)
|
||||
flags |= I915_DISPATCH_SECURE;
|
||||
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
|
|
|
@ -85,14 +85,14 @@ static int gen6_drpc(struct seq_file *m)
|
|||
gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
|
||||
|
||||
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
gen9_powergate_enable =
|
||||
intel_uncore_read(uncore, GEN9_PG_ENABLE);
|
||||
gen9_powergate_status =
|
||||
intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) <= 7)
|
||||
if (GRAPHICS_VER(i915) <= 7)
|
||||
sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
|
||||
|
@ -100,7 +100,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
|
||||
seq_printf(m, "RC6 Enabled: %s\n",
|
||||
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
seq_printf(m, "Render Well Gating Enabled: %s\n",
|
||||
yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
|
||||
seq_printf(m, "Media Well Gating Enabled: %s\n",
|
||||
|
@ -134,7 +134,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
|
||||
seq_printf(m, "Core Power Down: %s\n",
|
||||
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
seq_printf(m, "Render Power Well: %s\n",
|
||||
(gen9_powergate_status &
|
||||
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
|
||||
|
@ -150,7 +150,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
|
||||
print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
|
||||
|
||||
if (INTEL_GEN(i915) <= 7) {
|
||||
if (GRAPHICS_VER(i915) <= 7) {
|
||||
seq_printf(m, "RC6 voltage: %dmV\n",
|
||||
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
|
||||
seq_printf(m, "RC6+ voltage: %dmV\n",
|
||||
|
@ -230,7 +230,7 @@ static int drpc_show(struct seq_file *m, void *unused)
|
|||
with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
|
||||
err = vlv_drpc(m);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
err = gen6_drpc(m);
|
||||
else
|
||||
err = ilk_drpc(m);
|
||||
|
@ -250,7 +250,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
wakeref = intel_runtime_pm_get(uncore->rpm);
|
||||
|
||||
if (IS_GEN(i915, 5)) {
|
||||
if (GRAPHICS_VER(i915) == 5) {
|
||||
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
|
||||
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
|
||||
|
||||
|
@ -296,7 +296,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
|
||||
intel_gpu_freq(rps, rps->efficient_freq));
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
u32 rp_state_limits;
|
||||
u32 gt_perf_status;
|
||||
u32 rp_state_cap;
|
||||
|
@ -321,7 +321,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
|
||||
reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
reqf >>= 23;
|
||||
} else {
|
||||
reqf &= ~GEN6_TURBO_DISABLE;
|
||||
|
@ -354,7 +354,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
|
||||
pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
|
||||
/*
|
||||
|
@ -363,7 +363,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
*/
|
||||
pm_isr = 0;
|
||||
pm_iir = 0;
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
|
||||
pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
|
||||
pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
|
||||
|
@ -386,14 +386,14 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
|
||||
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
|
||||
pm_ier, pm_imr, pm_mask);
|
||||
if (INTEL_GEN(i915) <= 10)
|
||||
if (GRAPHICS_VER(i915) <= 10)
|
||||
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
|
||||
pm_isr, pm_iir);
|
||||
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
|
||||
rps->pm_intrmsk_mbz);
|
||||
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
||||
seq_printf(m, "Render p-state ratio: %d\n",
|
||||
(gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
||||
(gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
||||
seq_printf(m, "Render p-state VID: %d\n",
|
||||
gt_perf_status & 0xff);
|
||||
seq_printf(m, "Render p-state limit: %d\n",
|
||||
|
@ -437,20 +437,20 @@ static int frequency_show(struct seq_file *m, void *unused)
|
|||
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
|
||||
rp_state_cap >> 16) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
|
||||
max_freq = (rp_state_cap & 0xff00) >> 8;
|
||||
max_freq *= (IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
|
||||
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
|
||||
rp_state_cap >> 0) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||
|
@ -488,7 +488,7 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
{
|
||||
struct intel_gt *gt = m->private;
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
const bool edram = INTEL_GEN(i915) > 8;
|
||||
const bool edram = GRAPHICS_VER(i915) > 8;
|
||||
struct intel_rps *rps = >->rps;
|
||||
unsigned int max_gpu_freq, min_gpu_freq;
|
||||
intel_wakeref_t wakeref;
|
||||
|
@ -500,7 +500,7 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
|
||||
min_gpu_freq = rps->min_freq;
|
||||
max_gpu_freq = rps->max_freq;
|
||||
if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
|
||||
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
max_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
|
@ -518,7 +518,7 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
intel_gpu_freq(rps,
|
||||
(gpu_freq *
|
||||
(IS_GEN9_BC(i915) ||
|
||||
INTEL_GEN(i915) >= 10 ?
|
||||
GRAPHICS_VER(i915) >= 10 ?
|
||||
GEN9_FREQ_SCALER : 1))),
|
||||
((ia_freq >> 0) & 0xff) * 100,
|
||||
((ia_freq >> 8) & 0xff) * 100);
|
||||
|
@ -580,7 +580,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
|
|||
|
||||
seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
|
||||
|
||||
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
|
||||
if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 rpup, rpupei;
|
||||
u32 rpdown, rpdownei;
|
||||
|
|
|
@ -74,7 +74,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
cmd = MI_FLUSH;
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
cmd |= MI_EXE_FLUSH;
|
||||
if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
|
||||
if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
|
||||
cmd |= MI_INVALIDATE_ISP;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
|
|||
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
|
||||
* pipe control.
|
||||
*/
|
||||
if (IS_GEN(rq->engine->i915, 9))
|
||||
if (GRAPHICS_VER(rq->engine->i915) == 9)
|
||||
vf_flush_wa = true;
|
||||
|
||||
/* WaForGAMHang:kbl */
|
||||
|
|
|
@ -709,7 +709,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
|
|||
*
|
||||
* Gen12 has inherited the same read-only fault issue from gen11.
|
||||
*/
|
||||
ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
|
||||
ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
|
||||
|
||||
if (HAS_LMEM(gt->i915))
|
||||
ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
|
||||
|
|
|
@ -76,7 +76,7 @@ intel_context_reconfigure_sseu(struct intel_context *ce,
|
|||
{
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
|
||||
|
||||
ret = intel_context_lock_pinned(ce);
|
||||
if (ret)
|
||||
|
|
|
@ -240,10 +240,10 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
|
|||
* Though they added more rings on g4x/ilk, they did not add
|
||||
* per-engine HWSTAM until gen6.
|
||||
*/
|
||||
if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 3)
|
||||
if (GRAPHICS_VER(engine->i915) >= 3)
|
||||
ENGINE_WRITE(engine, RING_HWSTAM, mask);
|
||||
else
|
||||
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
|
||||
|
@ -265,6 +265,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||
const struct engine_info *info = &intel_engines[id];
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
u8 guc_class;
|
||||
|
||||
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
|
||||
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
|
||||
|
@ -293,9 +294,10 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||
engine->i915 = i915;
|
||||
engine->gt = gt;
|
||||
engine->uncore = gt->uncore;
|
||||
engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
|
||||
engine->hw_id = info->hw_id;
|
||||
engine->guc_id = MAKE_GUC_ID(info->class, info->instance);
|
||||
guc_class = engine_class_to_guc_class(info->class);
|
||||
engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
|
||||
engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
|
||||
|
||||
engine->irq_handler = nop_irq_handler;
|
||||
|
||||
|
@ -315,7 +317,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||
CONFIG_DRM_I915_TIMESLICE_DURATION;
|
||||
|
||||
/* Override to uninterruptible for OpenCL workloads. */
|
||||
if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
|
||||
if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
|
||||
engine->props.preempt_timeout_ms = 0;
|
||||
|
||||
engine->defaults = engine->props; /* never to change again */
|
||||
|
@ -352,8 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
|
|||
* HEVC support is present on first engine instance
|
||||
* before Gen11 and on all instances afterwards.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 11 ||
|
||||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
|
||||
if (GRAPHICS_VER(i915) >= 11 ||
|
||||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_CLASS_CAPABILITY_HEVC;
|
||||
|
||||
|
@ -361,14 +363,14 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
|
|||
* SFC block is present only on even logical engine
|
||||
* instances.
|
||||
*/
|
||||
if ((INTEL_GEN(i915) >= 11 &&
|
||||
if ((GRAPHICS_VER(i915) >= 11 &&
|
||||
(engine->gt->info.vdbox_sfc_access &
|
||||
BIT(engine->instance))) ||
|
||||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
|
||||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
|
||||
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
engine->uabi_capabilities |=
|
||||
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
|
||||
}
|
||||
|
@ -466,7 +468,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
|||
|
||||
info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
|
||||
|
||||
if (INTEL_GEN(i915) < 11)
|
||||
if (GRAPHICS_VER(i915) < 11)
|
||||
return info->engine_mask;
|
||||
|
||||
media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
|
@ -492,7 +494,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
|||
* hooked up to an SFC (Scaler & Format Converter) unit.
|
||||
* In TGL each VDBOX has access to an SFC.
|
||||
*/
|
||||
if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
|
||||
if (GRAPHICS_VER(i915) >= 12 || logical_vdbox++ % 2 == 0)
|
||||
gt->info.vdbox_sfc_access |= BIT(i);
|
||||
}
|
||||
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
|
||||
|
@ -729,7 +731,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
|
|||
intel_engine_init_whitelist(engine);
|
||||
intel_engine_init_ctx_wa(engine);
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
|
||||
|
||||
return 0;
|
||||
|
@ -997,9 +999,9 @@ u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
|
|||
|
||||
u64 acthd;
|
||||
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
if (GRAPHICS_VER(i915) >= 8)
|
||||
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
|
||||
else if (INTEL_GEN(i915) >= 4)
|
||||
else if (GRAPHICS_VER(i915) >= 4)
|
||||
acthd = ENGINE_READ(engine, RING_ACTHD);
|
||||
else
|
||||
acthd = ENGINE_READ(engine, ACTHD);
|
||||
|
@ -1011,7 +1013,7 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
|
|||
{
|
||||
u64 bbaddr;
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
|
||||
else
|
||||
bbaddr = ENGINE_READ(engine, RING_BBADDR);
|
||||
|
@ -1058,7 +1060,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 3)
|
||||
if (GRAPHICS_VER(engine->i915) < 3)
|
||||
return -ENODEV;
|
||||
|
||||
ENGINE_TRACE(engine, "\n");
|
||||
|
@ -1108,7 +1110,7 @@ read_subslice_reg(const struct intel_engine_cs *engine,
|
|||
u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
|
||||
enum forcewake_domains fw_domains;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
|
||||
mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
|
||||
} else {
|
||||
|
@ -1157,7 +1159,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
|||
|
||||
memset(instdone, 0, sizeof(*instdone));
|
||||
|
||||
switch (INTEL_GEN(i915)) {
|
||||
switch (GRAPHICS_VER(i915)) {
|
||||
default:
|
||||
instdone->instdone =
|
||||
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
|
||||
|
@ -1167,7 +1169,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
|||
|
||||
instdone->slice_common =
|
||||
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
if (GRAPHICS_VER(i915) >= 12) {
|
||||
instdone->slice_common_extra[0] =
|
||||
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
|
||||
instdone->slice_common_extra[1] =
|
||||
|
@ -1230,7 +1232,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||
idle = false;
|
||||
|
||||
/* No bit for gen2, so assume the CS parser is idle */
|
||||
if (INTEL_GEN(engine->i915) > 2 &&
|
||||
if (GRAPHICS_VER(engine->i915) > 2 &&
|
||||
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
|
||||
idle = false;
|
||||
|
||||
|
@ -1327,7 +1329,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
|
|||
|
||||
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 2:
|
||||
return false; /* uses physical not virtual addresses */
|
||||
case 3:
|
||||
|
@ -1432,7 +1434,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
u64 addr;
|
||||
|
||||
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||
if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
|
||||
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
|
||||
if (HAS_EXECLISTS(dev_priv)) {
|
||||
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
|
||||
|
@ -1449,13 +1451,13 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
|
||||
if (INTEL_GEN(engine->i915) > 2) {
|
||||
if (GRAPHICS_VER(engine->i915) > 2) {
|
||||
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
|
||||
ENGINE_READ(engine, RING_MI_MODE),
|
||||
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
if (GRAPHICS_VER(dev_priv) >= 6) {
|
||||
drm_printf(m, "\tRING_IMR: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_IMR));
|
||||
drm_printf(m, "\tRING_ESR: 0x%08x\n",
|
||||
|
@ -1472,15 +1474,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
addr = intel_engine_get_last_batch_head(engine);
|
||||
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
if (GRAPHICS_VER(dev_priv) >= 8)
|
||||
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
else if (GRAPHICS_VER(dev_priv) >= 4)
|
||||
addr = ENGINE_READ(engine, RING_DMA_FADD);
|
||||
else
|
||||
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
|
||||
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
if (GRAPHICS_VER(dev_priv) >= 4) {
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_IPEIR));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n",
|
||||
|
@ -1559,7 +1561,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
}
|
||||
rcu_read_unlock();
|
||||
execlists_active_unlock_bh(execlists);
|
||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||
} else if (GRAPHICS_VER(dev_priv) > 6) {
|
||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE));
|
||||
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
|
||||
|
|
|
@ -606,10 +606,10 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
|
|||
}
|
||||
|
||||
#define instdone_has_slice(dev_priv___, sseu___, slice___) \
|
||||
((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
|
||||
((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
|
||||
|
||||
#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
|
||||
(IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
|
||||
(GRAPHICS_VER(dev_priv__) == 7 ? (1 & BIT(subslice__)) : \
|
||||
intel_sseu_has_subslice(sseu__, 0, subslice__))
|
||||
|
||||
#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
|
||||
|
|
|
@ -1847,7 +1847,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
|
|||
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
|
||||
head, upper_32_bits(csb), lower_32_bits(csb));
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
promote = gen12_csb_parse(csb);
|
||||
else
|
||||
promote = gen8_csb_parse(csb);
|
||||
|
@ -2772,7 +2772,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
|
|||
|
||||
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 11)
|
||||
if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
|
||||
else
|
||||
mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
|
||||
|
@ -3103,7 +3103,7 @@ static void execlists_park(struct intel_engine_cs *engine)
|
|||
|
||||
static bool can_preempt(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) > 8)
|
||||
if (GRAPHICS_VER(engine->i915) > 8)
|
||||
return true;
|
||||
|
||||
/* GPGPU on bdw requires extra w/a; not implemented */
|
||||
|
@ -3156,13 +3156,13 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||
engine->emit_flush = gen8_emit_flush_xcs;
|
||||
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
|
||||
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
|
||||
if (INTEL_GEN(engine->i915) >= 12) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 12) {
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
|
||||
engine->emit_flush = gen12_emit_flush_xcs;
|
||||
}
|
||||
engine->set_default_submission = execlists_set_default_submission;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 11) {
|
||||
if (GRAPHICS_VER(engine->i915) < 11) {
|
||||
engine->irq_enable = gen8_logical_ring_enable_irq;
|
||||
engine->irq_disable = gen8_logical_ring_disable_irq;
|
||||
} else {
|
||||
|
@ -3195,7 +3195,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine)
|
|||
{
|
||||
unsigned int shift = 0;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 11) {
|
||||
if (GRAPHICS_VER(engine->i915) < 11) {
|
||||
const u8 irq_shifts[] = {
|
||||
[RCS0] = GEN8_RCS_IRQ_SHIFT,
|
||||
[BCS0] = GEN8_BCS_IRQ_SHIFT,
|
||||
|
@ -3215,7 +3215,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine)
|
|||
|
||||
static void rcs_submission_override(struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 12:
|
||||
engine->emit_flush = gen12_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
|
||||
|
@ -3266,13 +3266,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
|||
execlists->csb_write =
|
||||
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
|
||||
|
||||
if (INTEL_GEN(i915) < 11)
|
||||
if (GRAPHICS_VER(i915) < 11)
|
||||
execlists->csb_size = GEN8_CSB_ENTRIES;
|
||||
else
|
||||
execlists->csb_size = GEN11_CSB_ENTRIES;
|
||||
|
||||
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
|
||||
if (INTEL_GEN(engine->i915) >= 11) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 11) {
|
||||
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
|
||||
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
|
||||
}
|
||||
|
|
|
@ -107,10 +107,10 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
|
|||
if (!intel_vtd_active())
|
||||
return false;
|
||||
|
||||
if (IS_GEN(i915, 5) && IS_MOBILE(i915))
|
||||
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
|
||||
return true;
|
||||
|
||||
if (IS_GEN(i915, 12))
|
||||
if (GRAPHICS_VER(i915) == 12)
|
||||
return true; /* XXX DMAR fault reason 7 */
|
||||
|
||||
return false;
|
||||
|
@ -176,7 +176,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
|
|||
|
||||
gen8_ggtt_invalidate(ggtt);
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
if (GRAPHICS_VER(i915) >= 12)
|
||||
intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
|
||||
GEN12_GUC_TLB_INV_CR_INVALIDATE);
|
||||
else
|
||||
|
@ -746,7 +746,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
|
|||
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
i915_address_space_fini(&ggtt->vm);
|
||||
dma_resv_fini(&ggtt->vm.resv);
|
||||
|
||||
arch_phys_wc_del(ggtt->mtrr);
|
||||
|
||||
|
@ -768,6 +767,19 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
|
|||
ggtt_cleanup_hw(ggtt);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
|
||||
* all free objects have been drained.
|
||||
* @i915: i915 device
|
||||
*/
|
||||
void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
|
||||
GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
|
||||
dma_resv_fini(&ggtt->vm._resv);
|
||||
}
|
||||
|
||||
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
||||
{
|
||||
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
|
||||
|
@ -820,7 +832,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
|||
* resort to an uncached mapping. The WC issue is easily caught by the
|
||||
* readback check when writing GTT PTE entries.
|
||||
*/
|
||||
if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
|
||||
if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 10)
|
||||
ggtt->gsm = ioremap(phys_addr, size);
|
||||
else
|
||||
ggtt->gsm = ioremap_wc(phys_addr, size);
|
||||
|
@ -829,6 +841,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kref_init(&ggtt->vm.resv_ref);
|
||||
ret = setup_scratch_page(&ggtt->vm);
|
||||
if (ret) {
|
||||
drm_err(&i915->drm, "Scratch setup failed\n");
|
||||
|
@ -1065,7 +1078,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
|||
ggtt->vm.pte_encode = hsw_pte_encode;
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
ggtt->vm.pte_encode = byt_pte_encode;
|
||||
else if (INTEL_GEN(i915) >= 7)
|
||||
else if (GRAPHICS_VER(i915) >= 7)
|
||||
ggtt->vm.pte_encode = ivb_pte_encode;
|
||||
else
|
||||
ggtt->vm.pte_encode = snb_pte_encode;
|
||||
|
@ -1135,16 +1148,16 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
|
|||
ggtt->vm.gt = gt;
|
||||
ggtt->vm.i915 = i915;
|
||||
ggtt->vm.dma = i915->drm.dev;
|
||||
dma_resv_init(&ggtt->vm.resv);
|
||||
dma_resv_init(&ggtt->vm._resv);
|
||||
|
||||
if (INTEL_GEN(i915) <= 5)
|
||||
if (GRAPHICS_VER(i915) <= 5)
|
||||
ret = i915_gmch_probe(ggtt);
|
||||
else if (INTEL_GEN(i915) < 8)
|
||||
else if (GRAPHICS_VER(i915) < 8)
|
||||
ret = gen6_gmch_probe(ggtt);
|
||||
else
|
||||
ret = gen8_gmch_probe(ggtt);
|
||||
if (ret) {
|
||||
dma_resv_fini(&ggtt->vm.resv);
|
||||
dma_resv_fini(&ggtt->vm._resv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1196,7 +1209,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
|
|||
|
||||
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
|
||||
if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
|
@ -1261,7 +1274,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
|
|||
if (flush)
|
||||
wbinvd_on_all_cpus();
|
||||
|
||||
if (INTEL_GEN(ggtt->vm.i915) >= 8)
|
||||
if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
|
||||
setup_private_pat(ggtt->vm.gt->uncore);
|
||||
|
||||
intel_ggtt_restore_fences(ggtt);
|
||||
|
|
|
@ -56,7 +56,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence)
|
|||
int fence_pitch_shift;
|
||||
u64 val;
|
||||
|
||||
if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
|
||||
if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
|
||||
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
|
||||
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
|
||||
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
|
||||
|
@ -173,9 +173,9 @@ static void fence_write(struct i915_fence_reg *fence)
|
|||
* and explicitly managed for internal users.
|
||||
*/
|
||||
|
||||
if (IS_GEN(i915, 2))
|
||||
if (GRAPHICS_VER(i915) == 2)
|
||||
i830_write_fence_reg(fence);
|
||||
else if (IS_GEN(i915, 3))
|
||||
else if (GRAPHICS_VER(i915) == 3)
|
||||
i915_write_fence_reg(fence);
|
||||
else
|
||||
i965_write_fence_reg(fence);
|
||||
|
@ -188,7 +188,7 @@ static void fence_write(struct i915_fence_reg *fence)
|
|||
|
||||
static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
|
||||
{
|
||||
return INTEL_GEN(fence_to_i915(fence)) < 4;
|
||||
return GRAPHICS_VER(fence_to_i915(fence)) < 4;
|
||||
}
|
||||
|
||||
static int fence_update(struct i915_fence_reg *fence,
|
||||
|
@ -569,7 +569,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
||||
if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
|
||||
if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
|
||||
/*
|
||||
* On BDW+, swizzling is not used. We leave the CPU memory
|
||||
* controller in charge of optimizing memory accesses without
|
||||
|
@ -579,7 +579,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
if (i915->preserve_bios_swizzle) {
|
||||
if (intel_uncore_read(uncore, DISP_ARB_CTL) &
|
||||
DISP_TILE_SURFACE_SWIZZLING) {
|
||||
|
@ -611,14 +611,14 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
}
|
||||
}
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
/*
|
||||
* On Ironlake whatever DRAM config, GPU always do
|
||||
* same swizzling setup.
|
||||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
} else if (IS_GEN(i915, 2)) {
|
||||
} else if (GRAPHICS_VER(i915) == 2) {
|
||||
/*
|
||||
* As far as we know, the 865 doesn't have these bit 6
|
||||
* swizzling issues.
|
||||
|
@ -697,7 +697,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
}
|
||||
|
||||
/* check for L-shaped memory aka modified enhanced addressing */
|
||||
if (IS_GEN(i915, 4) &&
|
||||
if (GRAPHICS_VER(i915) == 4 &&
|
||||
!(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
@ -844,10 +844,10 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
|
|||
|
||||
if (!i915_ggtt_has_aperture(ggtt))
|
||||
num_fences = 0;
|
||||
else if (INTEL_GEN(i915) >= 7 &&
|
||||
else if (GRAPHICS_VER(i915) >= 7 &&
|
||||
!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
|
||||
num_fences = 32;
|
||||
else if (INTEL_GEN(i915) >= 4 ||
|
||||
else if (GRAPHICS_VER(i915) >= 4 ||
|
||||
IS_I945G(i915) || IS_I945GM(i915) ||
|
||||
IS_G33(i915) || IS_PINEVIEW(i915))
|
||||
num_fences = 16;
|
||||
|
@ -895,29 +895,29 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
|
|||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
if (INTEL_GEN(i915) < 5 ||
|
||||
if (GRAPHICS_VER(i915) < 5 ||
|
||||
i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
|
||||
return;
|
||||
|
||||
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
|
||||
|
||||
if (IS_GEN(i915, 5))
|
||||
if (GRAPHICS_VER(i915) == 5)
|
||||
return;
|
||||
|
||||
intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
intel_uncore_write(uncore,
|
||||
ARB_MODE,
|
||||
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
|
||||
else if (IS_GEN(i915, 7))
|
||||
else if (GRAPHICS_VER(i915) == 7)
|
||||
intel_uncore_write(uncore,
|
||||
ARB_MODE,
|
||||
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
|
||||
else if (IS_GEN(i915, 8))
|
||||
else if (GRAPHICS_VER(i915) == 8)
|
||||
intel_uncore_write(uncore,
|
||||
GAMTARBMODE,
|
||||
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
}
|
||||
|
|
|
@ -68,8 +68,6 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
|
|||
id = INTEL_REGION_LMEM;
|
||||
|
||||
mem->id = id;
|
||||
mem->type = INTEL_MEMORY_LOCAL;
|
||||
mem->instance = 0;
|
||||
|
||||
intel_memory_region_set_name(mem, "local%u", mem->instance);
|
||||
|
||||
|
@ -115,10 +113,10 @@ static void init_unused_rings(struct intel_gt *gt)
|
|||
init_unused_ring(gt, SRB1_BASE);
|
||||
init_unused_ring(gt, SRB2_BASE);
|
||||
init_unused_ring(gt, SRB3_BASE);
|
||||
} else if (IS_GEN(i915, 2)) {
|
||||
} else if (GRAPHICS_VER(i915) == 2) {
|
||||
init_unused_ring(gt, SRB0_BASE);
|
||||
init_unused_ring(gt, SRB1_BASE);
|
||||
} else if (IS_GEN(i915, 3)) {
|
||||
} else if (GRAPHICS_VER(i915) == 3) {
|
||||
init_unused_ring(gt, PRB1_BASE);
|
||||
init_unused_ring(gt, PRB2_BASE);
|
||||
}
|
||||
|
@ -135,7 +133,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
|
|||
/* Double layer security blanket, see i915_gem_init() */
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
|
||||
if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
|
||||
if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
|
||||
intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
|
||||
|
||||
if (IS_HASWELL(i915))
|
||||
|
@ -208,10 +206,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 eir;
|
||||
|
||||
if (!IS_GEN(i915, 2))
|
||||
if (GRAPHICS_VER(i915) != 2)
|
||||
clear_register(uncore, PGTBL_ER);
|
||||
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
clear_register(uncore, IPEIR(RENDER_RING_BASE));
|
||||
else
|
||||
clear_register(uncore, IPEIR_I965);
|
||||
|
@ -229,13 +227,13 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
|
|||
I915_MASTER_ERROR_INTERRUPT);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
if (GRAPHICS_VER(i915) >= 12) {
|
||||
rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
|
||||
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
|
||||
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
|
@ -273,7 +271,7 @@ static void gen8_check_faults(struct intel_gt *gt)
|
|||
i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
|
||||
u32 fault;
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 12) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 12) {
|
||||
fault_reg = GEN12_RING_FAULT_REG;
|
||||
fault_data0_reg = GEN12_FAULT_TLB_DATA0;
|
||||
fault_data1_reg = GEN12_FAULT_TLB_DATA1;
|
||||
|
@ -313,9 +311,9 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
|
|||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
if (GRAPHICS_VER(i915) >= 8)
|
||||
gen8_check_faults(gt);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_check_faults(gt);
|
||||
else
|
||||
return;
|
||||
|
@ -367,7 +365,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
|
|||
void intel_gt_chipset_flush(struct intel_gt *gt)
|
||||
{
|
||||
wmb();
|
||||
if (INTEL_GEN(gt->i915) < 6)
|
||||
if (GRAPHICS_VER(gt->i915) < 6)
|
||||
intel_gtt_chipset_flush();
|
||||
}
|
||||
|
||||
|
@ -591,7 +589,8 @@ int intel_gt_init(struct intel_gt *gt)
|
|||
*/
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
|
||||
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
|
||||
err = intel_gt_init_scratch(gt,
|
||||
GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
|
||||
if (err)
|
||||
goto out_fw;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
u32 f19_2_mhz = 19200000;
|
||||
u32 f24_mhz = 24000000;
|
||||
|
||||
if (INTEL_GEN(uncore->i915) <= 4) {
|
||||
if (GRAPHICS_VER(uncore->i915) <= 4) {
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
|
@ -85,7 +85,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
* (“CLKCFG”) MCHBAR register)
|
||||
*/
|
||||
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
|
||||
} else if (INTEL_GEN(uncore->i915) <= 8) {
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 8) {
|
||||
/*
|
||||
* PRMs say:
|
||||
*
|
||||
|
@ -94,7 +94,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
* rolling over every 1.5 hours).
|
||||
*/
|
||||
return f12_5_mhz;
|
||||
} else if (INTEL_GEN(uncore->i915) <= 9) {
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 9) {
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
|
||||
|
@ -113,7 +113,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
}
|
||||
|
||||
return freq;
|
||||
} else if (INTEL_GEN(uncore->i915) <= 12) {
|
||||
} else if (GRAPHICS_VER(uncore->i915) <= 12) {
|
||||
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
|
||||
u32 freq = 0;
|
||||
|
||||
|
@ -128,7 +128,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
|
|||
} else {
|
||||
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
|
||||
|
||||
if (INTEL_GEN(uncore->i915) <= 10)
|
||||
if (GRAPHICS_VER(uncore->i915) <= 10)
|
||||
freq = gen10_get_crystal_clock_freq(uncore, c0);
|
||||
else
|
||||
freq = gen11_get_crystal_clock_freq(uncore, c0);
|
||||
|
@ -211,7 +211,7 @@ u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
|
|||
* frozen machine.
|
||||
*/
|
||||
val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
|
||||
if (IS_GEN(gt->i915, 6))
|
||||
if (GRAPHICS_VER(gt->i915) == 6)
|
||||
val = div_u64_roundup(val, 25) * 25;
|
||||
|
||||
return val;
|
||||
|
|
|
@ -194,14 +194,18 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
|
|||
|
||||
void gen11_gt_irq_postinstall(struct intel_gt *gt)
|
||||
{
|
||||
const u32 irqs =
|
||||
GT_CS_MASTER_ERROR_INTERRUPT |
|
||||
GT_RENDER_USER_INTERRUPT |
|
||||
GT_CONTEXT_SWITCH_INTERRUPT |
|
||||
GT_WAIT_SEMAPHORE_INTERRUPT;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
const u32 dmask = irqs << 16 | irqs;
|
||||
const u32 smask = irqs << 16;
|
||||
u32 irqs = GT_RENDER_USER_INTERRUPT;
|
||||
u32 dmask;
|
||||
u32 smask;
|
||||
|
||||
if (!intel_uc_wants_guc_submission(>->uc))
|
||||
irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
|
||||
GT_CONTEXT_SWITCH_INTERRUPT |
|
||||
GT_WAIT_SEMAPHORE_INTERRUPT;
|
||||
|
||||
dmask = irqs << 16 | irqs;
|
||||
smask = irqs << 16;
|
||||
|
||||
BUILD_BUG_ON(irqs & 0xffff0000);
|
||||
|
||||
|
@ -395,7 +399,7 @@ void gen5_gt_irq_reset(struct intel_gt *gt)
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
GEN3_IRQ_RESET(uncore, GT);
|
||||
if (INTEL_GEN(gt->i915) >= 6)
|
||||
if (GRAPHICS_VER(gt->i915) >= 6)
|
||||
GEN3_IRQ_RESET(uncore, GEN6_PM);
|
||||
}
|
||||
|
||||
|
@ -413,14 +417,14 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
|
|||
}
|
||||
|
||||
gt_irqs |= GT_RENDER_USER_INTERRUPT;
|
||||
if (IS_GEN(gt->i915, 5))
|
||||
if (GRAPHICS_VER(gt->i915) == 5)
|
||||
gt_irqs |= ILK_BSD_USER_INTERRUPT;
|
||||
else
|
||||
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
||||
|
||||
GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 6) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 6) {
|
||||
/*
|
||||
* RPS interrupts will get enabled/disabled on demand when RPS
|
||||
* itself is enabled/disabled.
|
||||
|
|
|
@ -16,10 +16,10 @@ static void write_pm_imr(struct intel_gt *gt)
|
|||
u32 mask = gt->pm_imr;
|
||||
i915_reg_t reg;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
|
||||
mask <<= 16; /* pm is in upper half */
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
reg = GEN8_GT_IMR(2);
|
||||
} else {
|
||||
reg = GEN6_PMIMR;
|
||||
|
@ -61,7 +61,7 @@ void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
|
|||
void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
|
||||
|
@ -77,10 +77,10 @@ static void write_pm_ier(struct intel_gt *gt)
|
|||
u32 mask = gt->pm_ier;
|
||||
i915_reg_t reg;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
|
||||
mask <<= 16; /* pm is in upper half */
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
reg = GEN8_GT_IER(2);
|
||||
} else {
|
||||
reg = GEN6_PMIER;
|
||||
|
|
|
@ -22,8 +22,11 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
|
|||
* object underneath, with the idea that one object_lock() will lock
|
||||
* them all at once.
|
||||
*/
|
||||
if (!IS_ERR(obj))
|
||||
obj->base.resv = &vm->resv;
|
||||
if (!IS_ERR(obj)) {
|
||||
obj->base.resv = i915_vm_resv_get(vm);
|
||||
obj->shares_resv_from = vm;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
@ -40,8 +43,11 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
|
|||
* object underneath, with the idea that one object_lock() will lock
|
||||
* them all at once.
|
||||
*/
|
||||
if (!IS_ERR(obj))
|
||||
obj->base.resv = &vm->resv;
|
||||
if (!IS_ERR(obj)) {
|
||||
obj->base.resv = i915_vm_resv_get(vm);
|
||||
obj->shares_resv_from = vm;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
@ -102,7 +108,7 @@ void __i915_vm_close(struct i915_address_space *vm)
|
|||
int i915_vm_lock_objects(struct i915_address_space *vm,
|
||||
struct i915_gem_ww_ctx *ww)
|
||||
{
|
||||
if (vm->scratch[0]->base.resv == &vm->resv) {
|
||||
if (vm->scratch[0]->base.resv == &vm->_resv) {
|
||||
return i915_gem_object_lock(vm->scratch[0], ww);
|
||||
} else {
|
||||
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
|
@ -118,6 +124,22 @@ void i915_address_space_fini(struct i915_address_space *vm)
|
|||
mutex_destroy(&vm->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_vm_resv_release - Final struct i915_address_space destructor
|
||||
* @kref: Pointer to the &i915_address_space.resv_ref member.
|
||||
*
|
||||
* This function is called when the last lock sharer no longer shares the
|
||||
* &i915_address_space._resv lock.
|
||||
*/
|
||||
void i915_vm_resv_release(struct kref *kref)
|
||||
{
|
||||
struct i915_address_space *vm =
|
||||
container_of(kref, typeof(*vm), resv_ref);
|
||||
|
||||
dma_resv_fini(&vm->_resv);
|
||||
kfree(vm);
|
||||
}
|
||||
|
||||
static void __i915_vm_release(struct work_struct *work)
|
||||
{
|
||||
struct i915_address_space *vm =
|
||||
|
@ -125,9 +147,8 @@ static void __i915_vm_release(struct work_struct *work)
|
|||
|
||||
vm->cleanup(vm);
|
||||
i915_address_space_fini(vm);
|
||||
dma_resv_fini(&vm->resv);
|
||||
|
||||
kfree(vm);
|
||||
i915_vm_resv_put(vm);
|
||||
}
|
||||
|
||||
void i915_vm_release(struct kref *kref)
|
||||
|
@ -144,6 +165,14 @@ void i915_vm_release(struct kref *kref)
|
|||
void i915_address_space_init(struct i915_address_space *vm, int subclass)
|
||||
{
|
||||
kref_init(&vm->ref);
|
||||
|
||||
/*
|
||||
* Special case for GGTT that has already done an early
|
||||
* kref_init here.
|
||||
*/
|
||||
if (!kref_read(&vm->resv_ref))
|
||||
kref_init(&vm->resv_ref);
|
||||
|
||||
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
|
||||
atomic_set(&vm->open, 1);
|
||||
|
||||
|
@ -170,7 +199,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
|
|||
might_alloc(GFP_KERNEL);
|
||||
mutex_release(&vm->mutex.dep_map, _THIS_IP_);
|
||||
}
|
||||
dma_resv_init(&vm->resv);
|
||||
dma_resv_init(&vm->_resv);
|
||||
|
||||
GEM_BUG_ON(!vm->total);
|
||||
drm_mm_init(&vm->mm, 0, vm->total);
|
||||
|
@ -327,7 +356,7 @@ void gtt_write_workarounds(struct intel_gt *gt)
|
|||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
|
||||
else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
|
||||
else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
|
||||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
|
||||
|
@ -344,13 +373,13 @@ void gtt_write_workarounds(struct intel_gt *gt)
|
|||
* driver.
|
||||
*/
|
||||
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
|
||||
INTEL_GEN(i915) <= 10)
|
||||
GRAPHICS_VER(i915) <= 10)
|
||||
intel_uncore_rmw(uncore,
|
||||
GEN8_GAMW_ECO_DEV_RW_IA,
|
||||
0,
|
||||
GAMW_ECO_ENABLE_64K_IPS_FIELD);
|
||||
|
||||
if (IS_GEN_RANGE(i915, 8, 11)) {
|
||||
if (IS_GRAPHICS_VER(i915, 8, 11)) {
|
||||
bool can_use_gtt_cache = true;
|
||||
|
||||
/*
|
||||
|
@ -432,7 +461,7 @@ static void bdw_setup_private_ppat(struct intel_uncore *uncore)
|
|||
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
|
||||
/* for scanout with eLLC */
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
|
||||
else
|
||||
pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
|
||||
|
@ -481,11 +510,11 @@ void setup_private_pat(struct intel_uncore *uncore)
|
|||
{
|
||||
struct drm_i915_private *i915 = uncore->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
if (GRAPHICS_VER(i915) >= 12)
|
||||
tgl_setup_private_ppat(uncore);
|
||||
else if (INTEL_GEN(i915) >= 10)
|
||||
else if (GRAPHICS_VER(i915) >= 10)
|
||||
cnl_setup_private_ppat(uncore);
|
||||
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
|
||||
chv_setup_private_ppat(uncore);
|
||||
|
|
|
@ -245,7 +245,9 @@ struct i915_address_space {
|
|||
atomic_t open;
|
||||
|
||||
struct mutex mutex; /* protects vma and our lists */
|
||||
struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
|
||||
|
||||
struct kref resv_ref; /* kref to keep the reservation lock alive. */
|
||||
struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
|
||||
#define VM_CLASS_GGTT 0
|
||||
#define VM_CLASS_PPGTT 1
|
||||
#define VM_CLASS_DPT 2
|
||||
|
@ -405,13 +407,36 @@ i915_vm_get(struct i915_address_space *vm)
|
|||
return vm;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_vm_resv_get - Obtain a reference on the vm's reservation lock
|
||||
* @vm: The vm whose reservation lock we want to share.
|
||||
*
|
||||
* Return: A pointer to the vm's reservation lock.
|
||||
*/
|
||||
static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
|
||||
{
|
||||
kref_get(&vm->resv_ref);
|
||||
return &vm->_resv;
|
||||
}
|
||||
|
||||
void i915_vm_release(struct kref *kref);
|
||||
|
||||
void i915_vm_resv_release(struct kref *kref);
|
||||
|
||||
static inline void i915_vm_put(struct i915_address_space *vm)
|
||||
{
|
||||
kref_put(&vm->ref, i915_vm_release);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_vm_resv_put - Release a reference on the vm's reservation lock
|
||||
* @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
|
||||
*/
|
||||
static inline void i915_vm_resv_put(struct i915_address_space *vm)
|
||||
{
|
||||
kref_put(&vm->resv_ref, i915_vm_resv_release);
|
||||
}
|
||||
|
||||
static inline struct i915_address_space *
|
||||
i915_vm_open(struct i915_address_space *vm)
|
||||
{
|
||||
|
@ -507,6 +532,7 @@ void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
|
|||
void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
|
||||
int i915_init_ggtt(struct drm_i915_private *i915);
|
||||
void i915_ggtt_driver_release(struct drm_i915_private *i915);
|
||||
void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
|
||||
|
||||
static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
|
||||
{
|
||||
|
|
|
@ -64,7 +64,7 @@ static bool get_ia_constants(struct intel_llc *llc,
|
|||
|
||||
consts->min_gpu_freq = rps->min_freq;
|
||||
consts->max_gpu_freq = rps->max_freq;
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
consts->min_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
consts->max_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
|
@ -83,13 +83,13 @@ static void calc_ia_freq(struct intel_llc *llc,
|
|||
const int diff = consts->max_gpu_freq - gpu_freq;
|
||||
unsigned int ia_freq = 0, ring_freq = 0;
|
||||
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
/*
|
||||
* ring_freq = 2 * GT. ring_freq is in 100MHz units
|
||||
* No floor required for ring frequency on SKL.
|
||||
*/
|
||||
ring_freq = gpu_freq;
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
} else if (GRAPHICS_VER(i915) >= 8) {
|
||||
/* max(2 * GT, DDR). NB: GT is 50MHz units */
|
||||
ring_freq = max(consts->min_ring_freq, gpu_freq);
|
||||
} else if (IS_HASWELL(i915)) {
|
||||
|
|
|
@ -47,7 +47,7 @@ static void set_offsets(u32 *regs,
|
|||
*regs = MI_LOAD_REGISTER_IMM(count);
|
||||
if (flags & POSTED)
|
||||
*regs |= MI_LRI_FORCE_POSTED;
|
||||
if (INTEL_GEN(engine->i915) >= 11)
|
||||
if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
*regs |= MI_LRI_LRM_CS_MMIO;
|
||||
regs++;
|
||||
|
||||
|
@ -70,7 +70,7 @@ static void set_offsets(u32 *regs,
|
|||
if (close) {
|
||||
/* Close the batch; used mainly by live_lrc_layout() */
|
||||
*regs = MI_BATCH_BUFFER_END;
|
||||
if (INTEL_GEN(engine->i915) >= 10)
|
||||
if (GRAPHICS_VER(engine->i915) >= 10)
|
||||
*regs |= BIT(0);
|
||||
}
|
||||
}
|
||||
|
@ -498,22 +498,22 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
|||
* addressing to automatic fixup the register state between the
|
||||
* physical engines for virtual engine.
|
||||
*/
|
||||
GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
|
||||
GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 &&
|
||||
!intel_engine_has_relative_mmio(engine));
|
||||
|
||||
if (engine->class == RENDER_CLASS) {
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return gen12_rcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 11)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
return gen11_rcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return gen9_rcs_offsets;
|
||||
else
|
||||
return gen8_rcs_offsets;
|
||||
} else {
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return gen12_xcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return gen9_xcs_offsets;
|
||||
else
|
||||
return gen8_xcs_offsets;
|
||||
|
@ -522,9 +522,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
|||
|
||||
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x60;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return 0x54;
|
||||
else if (engine->class == RENDER_CLASS)
|
||||
return 0x58;
|
||||
|
@ -534,9 +534,9 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
|||
|
||||
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x74;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9)
|
||||
return 0x68;
|
||||
else if (engine->class == RENDER_CLASS)
|
||||
return 0xd8;
|
||||
|
@ -546,9 +546,9 @@ static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
|
|||
|
||||
static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x12;
|
||||
else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS)
|
||||
return 0x18;
|
||||
else
|
||||
return -1;
|
||||
|
@ -581,9 +581,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
|
|||
if (engine->class != RENDER_CLASS)
|
||||
return -1;
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0xb6;
|
||||
else if (INTEL_GEN(engine->i915) >= 11)
|
||||
else if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
return 0xaa;
|
||||
else
|
||||
return -1;
|
||||
|
@ -592,9 +592,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
|
|||
static u32
|
||||
lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
default:
|
||||
MISSING_CASE(INTEL_GEN(engine->i915));
|
||||
MISSING_CASE(GRAPHICS_VER(engine->i915));
|
||||
fallthrough;
|
||||
case 12:
|
||||
return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
|
||||
|
@ -637,7 +637,7 @@ static void init_common_regs(u32 * const regs,
|
|||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
if (inhibit)
|
||||
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
|
||||
if (INTEL_GEN(engine->i915) < 11)
|
||||
if (GRAPHICS_VER(engine->i915) < 11)
|
||||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
|
||||
CTX_CTRL_RS_CTX_ENABLE);
|
||||
regs[CTX_CONTEXT_CONTROL] = ctl;
|
||||
|
@ -805,7 +805,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
|
|||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
||||
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
|
||||
|
||||
if (INTEL_GEN(engine->i915) == 12) {
|
||||
if (GRAPHICS_VER(engine->i915) == 12) {
|
||||
ce->wa_bb_page = context_size / PAGE_SIZE;
|
||||
context_size += PAGE_SIZE;
|
||||
}
|
||||
|
@ -1114,7 +1114,7 @@ static u32 lrc_descriptor(const struct intel_context *ce)
|
|||
desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
|
||||
if (IS_GEN(ce->vm->i915, 8))
|
||||
if (GRAPHICS_VER(ce->vm->i915) == 8)
|
||||
desc |= GEN8_CTX_L3LLC_COHERENT;
|
||||
|
||||
return i915_ggtt_offset(ce->state) | desc;
|
||||
|
@ -1469,7 +1469,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
|
|||
if (engine->class != RENDER_CLASS)
|
||||
return;
|
||||
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 12:
|
||||
case 11:
|
||||
return;
|
||||
|
@ -1486,7 +1486,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
|
|||
wa_bb_fn[1] = NULL;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(INTEL_GEN(engine->i915));
|
||||
MISSING_CASE(GRAPHICS_VER(engine->i915));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -344,11 +344,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
table->size = ARRAY_SIZE(dg1_mocs_table);
|
||||
table->table = dg1_mocs_table;
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
} else if (INTEL_GEN(i915) >= 12) {
|
||||
} else if (GRAPHICS_VER(i915) >= 12) {
|
||||
table->size = ARRAY_SIZE(tgl_mocs_table);
|
||||
table->table = tgl_mocs_table;
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
} else if (IS_GEN(i915, 11)) {
|
||||
} else if (GRAPHICS_VER(i915) == 11) {
|
||||
table->size = ARRAY_SIZE(icl_mocs_table);
|
||||
table->table = icl_mocs_table;
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
|
@ -361,7 +361,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
table->table = broxton_mocs_table;
|
||||
} else {
|
||||
drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
|
||||
drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
|
||||
"Platform that should have a MOCS table does not.\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
return 0;
|
||||
|
||||
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
|
||||
if (IS_GEN(i915, 9)) {
|
||||
if (GRAPHICS_VER(i915) == 9) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->size; i++)
|
||||
|
|
|
@ -146,9 +146,9 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
|
|||
|
||||
gtt_write_workarounds(gt);
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
gen6_ppgtt_enable(gt);
|
||||
else if (IS_GEN(i915, 7))
|
||||
else if (GRAPHICS_VER(i915) == 7)
|
||||
gen7_ppgtt_enable(gt);
|
||||
|
||||
return 0;
|
||||
|
@ -157,7 +157,7 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
|
|||
static struct i915_ppgtt *
|
||||
__ppgtt_create(struct intel_gt *gt)
|
||||
{
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
if (GRAPHICS_VER(gt->i915) < 8)
|
||||
return gen6_ppgtt_create(gt);
|
||||
else
|
||||
return gen8_ppgtt_create(gt);
|
||||
|
@ -307,7 +307,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
|
|||
ppgtt->vm.dma = i915->drm.dev;
|
||||
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
|
||||
|
||||
dma_resv_init(&ppgtt->vm.resv);
|
||||
dma_resv_init(&ppgtt->vm._resv);
|
||||
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
|
||||
|
||||
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
|
||||
|
|
|
@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
|
|||
GEN9_MEDIA_PG_ENABLE |
|
||||
GEN11_MEDIA_SAMPLER_PG_ENABLE;
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 12) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 12) {
|
||||
for (i = 0; i < I915_MAX_VCS; i++)
|
||||
if (HAS_ENGINE(gt, _VCS(i)))
|
||||
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
|
||||
|
@ -126,7 +126,7 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
|
|||
enum intel_engine_id id;
|
||||
|
||||
/* 2b: Program RC6 thresholds.*/
|
||||
if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
|
||||
if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 10) {
|
||||
set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
|
||||
set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
|
||||
} else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
|
||||
|
@ -249,9 +249,9 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
|
|||
rc6vids = 0;
|
||||
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
if (IS_GEN(i915, 6) && ret) {
|
||||
if (GRAPHICS_VER(i915) == 6 && ret) {
|
||||
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
|
||||
} else if (IS_GEN(i915, 6) &&
|
||||
} else if (GRAPHICS_VER(i915) == 6 &&
|
||||
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
|
||||
drm_dbg(&i915->drm,
|
||||
"You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
|
||||
|
@ -515,7 +515,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
|
|||
struct intel_uncore *uncore = rc6_to_uncore(rc6);
|
||||
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
set(uncore, GEN9_PG_ENABLE, 0);
|
||||
set(uncore, GEN6_RC_CONTROL, 0);
|
||||
set(uncore, GEN6_RC_STATE, 0);
|
||||
|
@ -575,13 +575,13 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
|
|||
chv_rc6_enable(rc6);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_rc6_enable(rc6);
|
||||
else if (INTEL_GEN(i915) >= 11)
|
||||
else if (GRAPHICS_VER(i915) >= 11)
|
||||
gen11_rc6_enable(rc6);
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
else if (GRAPHICS_VER(i915) >= 9)
|
||||
gen9_rc6_enable(rc6);
|
||||
else if (IS_BROADWELL(i915))
|
||||
gen8_rc6_enable(rc6);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rc6_enable(rc6);
|
||||
|
||||
rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_memory_region.h"
|
||||
#include "intel_region_lmem.h"
|
||||
#include "intel_region_ttm.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "intel_region_lmem.h"
|
||||
|
@ -66,9 +68,9 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
|
|||
static void
|
||||
region_lmem_release(struct intel_memory_region *mem)
|
||||
{
|
||||
release_fake_lmem_bar(mem);
|
||||
intel_region_ttm_fini(mem);
|
||||
io_mapping_fini(&mem->iomap);
|
||||
intel_memory_region_release_buddy(mem);
|
||||
release_fake_lmem_bar(mem);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -83,12 +85,21 @@ region_lmem_init(struct intel_memory_region *mem)
|
|||
|
||||
if (!io_mapping_init_wc(&mem->iomap,
|
||||
mem->io_start,
|
||||
resource_size(&mem->region)))
|
||||
return -EIO;
|
||||
resource_size(&mem->region))) {
|
||||
ret = -EIO;
|
||||
goto out_no_io;
|
||||
}
|
||||
|
||||
ret = intel_memory_region_init_buddy(mem);
|
||||
ret = intel_region_ttm_init(mem);
|
||||
if (ret)
|
||||
io_mapping_fini(&mem->iomap);
|
||||
goto out_no_buddy;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_buddy:
|
||||
io_mapping_fini(&mem->iomap);
|
||||
out_no_io:
|
||||
release_fake_lmem_bar(mem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,6 +138,8 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
|
|||
mappable_end,
|
||||
PAGE_SIZE,
|
||||
io_start,
|
||||
INTEL_MEMORY_LOCAL,
|
||||
0,
|
||||
&intel_region_lmem_ops);
|
||||
if (!IS_ERR(mem)) {
|
||||
drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
|
||||
|
@ -198,6 +211,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
|
|||
lmem_size,
|
||||
I915_GTT_PAGE_SIZE_4K,
|
||||
io_start,
|
||||
INTEL_MEMORY_LOCAL,
|
||||
0,
|
||||
&intel_region_lmem_ops);
|
||||
if (IS_ERR(mem))
|
||||
return mem;
|
||||
|
|
|
@ -15,7 +15,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
|
|||
if (engine->class != RENDER_CLASS)
|
||||
return NULL;
|
||||
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 6:
|
||||
return &gen6_null_state;
|
||||
case 7:
|
||||
|
|
|
@ -421,7 +421,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
|
|||
struct intel_engine_cs *paired_vecs;
|
||||
|
||||
if (engine->class != VIDEO_DECODE_CLASS ||
|
||||
!IS_GEN(engine->i915, 12))
|
||||
GRAPHICS_VER(engine->i915) != 12)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -633,7 +633,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
|
|||
*/
|
||||
}
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 11)
|
||||
if (GRAPHICS_VER(gt->i915) >= 11)
|
||||
ret = gen11_reset_engines(gt, engine_mask, retry);
|
||||
else
|
||||
ret = gen6_reset_engines(gt, engine_mask, retry);
|
||||
|
@ -662,17 +662,17 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
|
|||
|
||||
if (is_mock_gt(gt))
|
||||
return mock_reset;
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
return gen8_reset_engines;
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
return gen6_reset_engines;
|
||||
else if (INTEL_GEN(i915) >= 5)
|
||||
else if (GRAPHICS_VER(i915) >= 5)
|
||||
return ilk_do_reset;
|
||||
else if (IS_G4X(i915))
|
||||
return g4x_do_reset;
|
||||
else if (IS_G33(i915) || IS_PINEVIEW(i915))
|
||||
return g33_do_reset;
|
||||
else if (INTEL_GEN(i915) >= 3)
|
||||
else if (GRAPHICS_VER(i915) >= 3)
|
||||
return i915_do_reset;
|
||||
else
|
||||
return NULL;
|
||||
|
@ -724,7 +724,7 @@ bool intel_has_reset_engine(const struct intel_gt *gt)
|
|||
int intel_reset_guc(struct intel_gt *gt)
|
||||
{
|
||||
u32 guc_domain =
|
||||
INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
|
||||
GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!HAS_GT_UC(gt->i915));
|
||||
|
|
|
@ -29,7 +29,7 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
|
|||
* lost interrupts following a reset.
|
||||
*/
|
||||
if (engine->class == RENDER_CLASS) {
|
||||
if (INTEL_GEN(engine->i915) >= 6)
|
||||
if (GRAPHICS_VER(engine->i915) >= 6)
|
||||
mask &= ~BIT(0);
|
||||
else
|
||||
mask &= ~I915_USER_INTERRUPT;
|
||||
|
@ -43,7 +43,7 @@ static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
|
|||
u32 addr;
|
||||
|
||||
addr = lower_32_bits(phys);
|
||||
if (INTEL_GEN(engine->i915) >= 4)
|
||||
if (GRAPHICS_VER(engine->i915) >= 4)
|
||||
addr |= (phys >> 28) & 0xf0;
|
||||
|
||||
intel_uncore_write(engine->uncore, HWS_PGA, addr);
|
||||
|
@ -71,7 +71,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
|||
* The ring status page addresses are no longer next to the rest of
|
||||
* the ring registers as of gen7.
|
||||
*/
|
||||
if (IS_GEN(engine->i915, 7)) {
|
||||
if (GRAPHICS_VER(engine->i915) == 7) {
|
||||
switch (engine->id) {
|
||||
/*
|
||||
* No more rings exist on Gen7. Default case is only to shut up
|
||||
|
@ -93,7 +93,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
|||
hwsp = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN(engine->i915, 6)) {
|
||||
} else if (GRAPHICS_VER(engine->i915) == 6) {
|
||||
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||
} else {
|
||||
hwsp = RING_HWS_PGA(engine->mmio_base);
|
||||
|
@ -105,7 +105,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
|||
|
||||
static void flush_cs_tlb(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!IS_GEN_RANGE(engine->i915, 6, 7))
|
||||
if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
|
||||
return;
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
|
@ -153,7 +153,7 @@ static void set_pp_dir(struct intel_engine_cs *engine)
|
|||
ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
|
||||
ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 7) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 7) {
|
||||
ENGINE_WRITE_FW(engine,
|
||||
RING_MODE_GEN7,
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
|
@ -229,7 +229,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
|
|||
5000, 0, NULL))
|
||||
goto err;
|
||||
|
||||
if (INTEL_GEN(engine->i915) > 2)
|
||||
if (GRAPHICS_VER(engine->i915) > 2)
|
||||
ENGINE_WRITE_FW(engine,
|
||||
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
|
||||
|
@ -646,9 +646,9 @@ static int mi_set_context(struct i915_request *rq,
|
|||
u32 *cs;
|
||||
|
||||
len = 4;
|
||||
if (IS_GEN(i915, 7))
|
||||
if (GRAPHICS_VER(i915) == 7)
|
||||
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
|
||||
else if (IS_GEN(i915, 5))
|
||||
else if (GRAPHICS_VER(i915) == 5)
|
||||
len += 2;
|
||||
if (flags & MI_FORCE_RESTORE) {
|
||||
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
|
||||
|
@ -662,7 +662,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
return PTR_ERR(cs);
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (GRAPHICS_VER(i915) == 7) {
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
|
||||
if (num_engines) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
@ -678,7 +678,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
GEN6_PSMI_SLEEP_MSG_DISABLE);
|
||||
}
|
||||
}
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
/*
|
||||
* This w/a is only listed for pre-production ilk a/b steppings,
|
||||
* but is also mentioned for programming the powerctx. To be
|
||||
|
@ -716,7 +716,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
*/
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (GRAPHICS_VER(i915) == 7) {
|
||||
if (num_engines) {
|
||||
struct intel_engine_cs *signaller;
|
||||
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
||||
|
@ -740,7 +740,7 @@ static int mi_set_context(struct i915_request *rq,
|
|||
*cs++ = MI_NOOP;
|
||||
}
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
*cs++ = MI_SUSPEND_FLUSH;
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1001,7 @@ static void ring_release(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
|
||||
drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
|
||||
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
||||
|
||||
intel_engine_cleanup_common(engine);
|
||||
|
@ -1029,13 +1029,13 @@ static void setup_irq(struct intel_engine_cs *engine)
|
|||
|
||||
intel_engine_set_irq_handler(engine, irq_handler);
|
||||
|
||||
if (INTEL_GEN(i915) >= 6) {
|
||||
if (GRAPHICS_VER(i915) >= 6) {
|
||||
engine->irq_enable = gen6_irq_enable;
|
||||
engine->irq_disable = gen6_irq_disable;
|
||||
} else if (INTEL_GEN(i915) >= 5) {
|
||||
} else if (GRAPHICS_VER(i915) >= 5) {
|
||||
engine->irq_enable = gen5_irq_enable;
|
||||
engine->irq_disable = gen5_irq_disable;
|
||||
} else if (INTEL_GEN(i915) >= 3) {
|
||||
} else if (GRAPHICS_VER(i915) >= 3) {
|
||||
engine->irq_enable = gen3_irq_enable;
|
||||
engine->irq_disable = gen3_irq_disable;
|
||||
} else {
|
||||
|
@ -1049,7 +1049,7 @@ static void setup_common(struct intel_engine_cs *engine)
|
|||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
/* gen8+ are only supported with execlists */
|
||||
GEM_BUG_ON(INTEL_GEN(i915) >= 8);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
|
||||
|
||||
setup_irq(engine);
|
||||
|
||||
|
@ -1070,14 +1070,14 @@ static void setup_common(struct intel_engine_cs *engine)
|
|||
* engine->emit_init_breadcrumb().
|
||||
*/
|
||||
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
|
||||
if (IS_GEN(i915, 5))
|
||||
if (GRAPHICS_VER(i915) == 5)
|
||||
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
|
||||
|
||||
engine->set_default_submission = i9xx_set_default_submission;
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
if (GRAPHICS_VER(i915) >= 6)
|
||||
engine->emit_bb_start = gen6_emit_bb_start;
|
||||
else if (INTEL_GEN(i915) >= 4)
|
||||
else if (GRAPHICS_VER(i915) >= 4)
|
||||
engine->emit_bb_start = gen4_emit_bb_start;
|
||||
else if (IS_I830(i915) || IS_I845G(i915))
|
||||
engine->emit_bb_start = i830_emit_bb_start;
|
||||
|
@ -1094,16 +1094,16 @@ static void setup_rcs(struct intel_engine_cs *engine)
|
|||
|
||||
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
|
||||
if (INTEL_GEN(i915) >= 7) {
|
||||
if (GRAPHICS_VER(i915) >= 7) {
|
||||
engine->emit_flush = gen7_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
|
||||
} else if (IS_GEN(i915, 6)) {
|
||||
} else if (GRAPHICS_VER(i915) == 6) {
|
||||
engine->emit_flush = gen6_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
|
||||
} else if (IS_GEN(i915, 5)) {
|
||||
} else if (GRAPHICS_VER(i915) == 5) {
|
||||
engine->emit_flush = gen4_emit_flush_rcs;
|
||||
} else {
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
if (GRAPHICS_VER(i915) < 4)
|
||||
engine->emit_flush = gen2_emit_flush;
|
||||
else
|
||||
engine->emit_flush = gen4_emit_flush_rcs;
|
||||
|
@ -1118,20 +1118,20 @@ static void setup_vcs(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
if (INTEL_GEN(i915) >= 6) {
|
||||
if (GRAPHICS_VER(i915) >= 6) {
|
||||
/* gen6 bsd needs a special wa for tail updates */
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
engine->set_default_submission = gen6_bsd_set_default_submission;
|
||||
engine->emit_flush = gen6_emit_flush_vcs;
|
||||
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
|
||||
else
|
||||
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
||||
} else {
|
||||
engine->emit_flush = gen4_emit_flush_vcs;
|
||||
if (IS_GEN(i915, 5))
|
||||
if (GRAPHICS_VER(i915) == 5)
|
||||
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
|
||||
else
|
||||
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
|
||||
|
@ -1145,7 +1145,7 @@ static void setup_bcs(struct intel_engine_cs *engine)
|
|||
engine->emit_flush = gen6_emit_flush_xcs;
|
||||
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
if (GRAPHICS_VER(i915) == 6)
|
||||
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
|
||||
else
|
||||
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
||||
|
@ -1155,7 +1155,7 @@ static void setup_vecs(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 7);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
|
||||
|
||||
engine->emit_flush = gen6_emit_flush_xcs;
|
||||
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
||||
|
@ -1203,7 +1203,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
|
|||
struct i915_vma *vma;
|
||||
int size, err;
|
||||
|
||||
if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
|
||||
return 0;
|
||||
|
||||
err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
|
||||
|
|
|
@ -196,7 +196,7 @@ static void rps_reset_interrupts(struct intel_rps *rps)
|
|||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (INTEL_GEN(gt->i915) >= 11)
|
||||
if (GRAPHICS_VER(gt->i915) >= 11)
|
||||
gen11_rps_reset_interrupts(rps);
|
||||
else
|
||||
gen6_rps_reset_interrupts(rps);
|
||||
|
@ -630,7 +630,7 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
|
|||
* frequency, if the down threshold expires in that window we will not
|
||||
* receive a down interrupt.
|
||||
*/
|
||||
if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
|
||||
if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
|
||||
limits = rps->max_freq_softlimit << 23;
|
||||
if (val <= rps->min_freq_softlimit)
|
||||
limits |= rps->min_freq_softlimit << 14;
|
||||
|
@ -697,7 +697,7 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
|
|||
intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
|
||||
|
||||
set(uncore, GEN6_RP_CONTROL,
|
||||
(INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
|
||||
(GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
|
@ -771,7 +771,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
|
|||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
u32 swreq;
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
swreq = GEN9_FREQUENCY(val);
|
||||
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
swreq = HSW_FREQUENCY(val);
|
||||
|
@ -812,14 +812,14 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
|
|||
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
|
||||
err = vlv_rps_set(rps, val);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
err = gen6_rps_set(rps, val);
|
||||
else
|
||||
err = gen5_rps_set(rps, val);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (update && INTEL_GEN(i915) >= 6)
|
||||
if (update && GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rps_set_thresholds(rps, val);
|
||||
rps->last_freq = val;
|
||||
|
||||
|
@ -853,7 +853,7 @@ void intel_rps_unpark(struct intel_rps *rps)
|
|||
if (intel_rps_uses_timer(rps))
|
||||
rps_start_timer(rps);
|
||||
|
||||
if (IS_GEN(rps_to_i915(rps), 5))
|
||||
if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
|
||||
gen5_rps_update(rps);
|
||||
}
|
||||
|
||||
|
@ -999,7 +999,7 @@ static void gen6_rps_init(struct intel_rps *rps)
|
|||
|
||||
rps->efficient_freq = rps->rp1_freq;
|
||||
if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
|
||||
IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
|
||||
IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
|
||||
u32 ddcc_status = 0;
|
||||
|
||||
if (sandybridge_pcode_read(i915,
|
||||
|
@ -1012,7 +1012,7 @@ static void gen6_rps_init(struct intel_rps *rps)
|
|||
rps->max_freq);
|
||||
}
|
||||
|
||||
if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
|
||||
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
|
||||
/* Store the frequency values in 16.66 MHZ units, which is
|
||||
* the natural hardware unit for SKL
|
||||
*/
|
||||
|
@ -1048,7 +1048,7 @@ static bool gen9_rps_enable(struct intel_rps *rps)
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
/* Program defaults and thresholds for RPS */
|
||||
if (IS_GEN(gt->i915, 9))
|
||||
if (GRAPHICS_VER(gt->i915) == 9)
|
||||
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
|
||||
GEN9_FREQUENCY(rps->rp1_freq));
|
||||
|
||||
|
@ -1365,16 +1365,16 @@ void intel_rps_enable(struct intel_rps *rps)
|
|||
enabled = chv_rps_enable(rps);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
enabled = vlv_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
else if (GRAPHICS_VER(i915) >= 9)
|
||||
enabled = gen9_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
enabled = gen8_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
enabled = gen6_rps_enable(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
enabled = gen5_rps_enable(rps);
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
if (!enabled)
|
||||
return;
|
||||
|
@ -1393,7 +1393,7 @@ void intel_rps_enable(struct intel_rps *rps)
|
|||
|
||||
if (has_busy_stats(rps))
|
||||
intel_rps_set_timer(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
intel_rps_set_interrupts(rps);
|
||||
else
|
||||
/* Ironlake currently uses intel_ips.ko */ {}
|
||||
|
@ -1414,7 +1414,7 @@ void intel_rps_disable(struct intel_rps *rps)
|
|||
intel_rps_clear_interrupts(rps);
|
||||
intel_rps_clear_timer(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rps_disable(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
gen5_rps_disable(rps);
|
||||
|
@ -1453,14 +1453,14 @@ int intel_gpu_freq(struct intel_rps *rps, int val)
|
|||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
|
||||
GEN9_FREQ_SCALER);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
return chv_gpu_freq(rps, val);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
return byt_gpu_freq(rps, val);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
return val * GT_FREQUENCY_MULTIPLIER;
|
||||
else
|
||||
return val;
|
||||
|
@ -1470,14 +1470,14 @@ int intel_freq_opcode(struct intel_rps *rps, int val)
|
|||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
if (GRAPHICS_VER(i915) >= 9)
|
||||
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
|
||||
GT_FREQUENCY_MULTIPLIER);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
return chv_freq_opcode(rps, val);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
return byt_freq_opcode(rps, val);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
|
||||
else
|
||||
return val;
|
||||
|
@ -1770,7 +1770,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
|||
spin_unlock(>->irq_lock);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 8)
|
||||
if (GRAPHICS_VER(gt->i915) >= 8)
|
||||
return;
|
||||
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
|
@ -1833,7 +1833,7 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
chv_rps_init(rps);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_rps_init(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
gen6_rps_init(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
gen5_rps_init(rps);
|
||||
|
@ -1843,7 +1843,7 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
rps->min_freq_softlimit = rps->min_freq;
|
||||
|
||||
/* After setting max-softlimit, find the overclock max freq */
|
||||
if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
|
||||
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
|
||||
u32 params = 0;
|
||||
|
||||
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
|
||||
|
@ -1872,16 +1872,16 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
*
|
||||
* TODO: verify if this can be reproduced on VLV,CHV.
|
||||
*/
|
||||
if (INTEL_GEN(i915) <= 7)
|
||||
if (GRAPHICS_VER(i915) <= 7)
|
||||
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
|
||||
if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
|
||||
if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
|
||||
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
|
||||
}
|
||||
|
||||
void intel_rps_sanitize(struct intel_rps *rps)
|
||||
{
|
||||
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
|
||||
if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
|
||||
rps_disable_interrupts(rps);
|
||||
}
|
||||
|
||||
|
@ -1892,11 +1892,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
|||
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
|
||||
cagf = (rpstat >> 8) & 0xff;
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
else if (GRAPHICS_VER(i915) >= 9)
|
||||
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
|
||||
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
else if (GRAPHICS_VER(i915) >= 6)
|
||||
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
|
||||
else
|
||||
cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
|
||||
|
@ -1915,7 +1915,7 @@ static u32 read_cagf(struct intel_rps *rps)
|
|||
vlv_punit_get(i915);
|
||||
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
|
||||
vlv_punit_put(i915);
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(i915) >= 6) {
|
||||
freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
|
||||
} else {
|
||||
freq = intel_uncore_read(uncore, MEMSTAT_ILK);
|
||||
|
@ -1968,7 +1968,7 @@ void intel_rps_driver_register(struct intel_rps *rps)
|
|||
* We only register the i915 ips part with intel-ips once everything is
|
||||
* set up, to avoid intel-ips sneaking in and reading bogus values.
|
||||
*/
|
||||
if (IS_GEN(gt->i915, 5)) {
|
||||
if (GRAPHICS_VER(gt->i915) == 5) {
|
||||
GEM_BUG_ON(ips_mchdev);
|
||||
rcu_assign_pointer(ips_mchdev, gt->i915);
|
||||
ips_ping_for_i915_load();
|
||||
|
|
|
@ -590,13 +590,13 @@ void intel_sseu_info_init(struct intel_gt *gt)
|
|||
cherryview_sseu_info_init(gt);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 9))
|
||||
else if (GRAPHICS_VER(i915) == 9)
|
||||
gen9_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 10))
|
||||
else if (GRAPHICS_VER(i915) == 10)
|
||||
gen10_sseu_info_init(gt);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
gen11_sseu_info_init(gt);
|
||||
else if (INTEL_GEN(i915) >= 12)
|
||||
else if (GRAPHICS_VER(i915) >= 12)
|
||||
gen12_sseu_info_init(gt);
|
||||
}
|
||||
|
||||
|
@ -613,7 +613,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
|||
* No explicit RPCS request is needed to ensure full
|
||||
* slice/subslice/EU enablement prior to Gen9.
|
||||
*/
|
||||
if (INTEL_GEN(i915) < 9)
|
||||
if (GRAPHICS_VER(i915) < 9)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -651,7 +651,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
|||
* subslices are enabled, or a count between one and four on the first
|
||||
* slice.
|
||||
*/
|
||||
if (IS_GEN(i915, 11) &&
|
||||
if (GRAPHICS_VER(i915) == 11 &&
|
||||
slices == 1 &&
|
||||
subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
|
||||
GEM_BUG_ON(subslices & 1);
|
||||
|
@ -669,7 +669,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
|||
if (sseu->has_slice_pg) {
|
||||
u32 mask, val = slices;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
mask = GEN11_RPCS_S_CNT_MASK;
|
||||
val <<= GEN11_RPCS_S_CNT_SHIFT;
|
||||
} else {
|
||||
|
|
|
@ -248,7 +248,7 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
|
|||
struct sseu_dev_info sseu;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
if (GRAPHICS_VER(i915) < 8)
|
||||
return -ENODEV;
|
||||
|
||||
seq_puts(m, "SSEU Device Info\n");
|
||||
|
@ -265,9 +265,9 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
|
|||
cherryview_sseu_device_status(gt, &sseu);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_sseu_device_status(gt, &sseu);
|
||||
else if (IS_GEN(i915, 9))
|
||||
else if (GRAPHICS_VER(i915) == 9)
|
||||
gen9_sseu_device_status(gt, &sseu);
|
||||
else if (INTEL_GEN(i915) >= 10)
|
||||
else if (GRAPHICS_VER(i915) >= 10)
|
||||
gen10_sseu_device_status(gt, &sseu);
|
||||
}
|
||||
|
||||
|
|
|
@ -699,9 +699,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|||
|
||||
if (IS_DG1(i915))
|
||||
dg1_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
icl_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_ctx_workarounds_init(engine, wal);
|
||||
|
@ -719,14 +719,14 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|||
chv_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_BROADWELL(i915))
|
||||
bdw_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 7))
|
||||
else if (GRAPHICS_VER(i915) == 7)
|
||||
gen7_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 6))
|
||||
else if (GRAPHICS_VER(i915) == 6)
|
||||
gen6_ctx_workarounds_init(engine, wal);
|
||||
else if (INTEL_GEN(i915) < 8)
|
||||
else if (GRAPHICS_VER(i915) < 8)
|
||||
;
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
|
||||
wa_init_finish(wal);
|
||||
}
|
||||
|
@ -950,7 +950,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
unsigned int slice, subslice;
|
||||
u32 l3_en, mcr, mcr_mask;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 10);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 10);
|
||||
|
||||
/*
|
||||
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
|
||||
|
@ -980,7 +980,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
* of every MMIO read.
|
||||
*/
|
||||
|
||||
if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
|
||||
if (GRAPHICS_VER(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
|
||||
u32 l3_fuse =
|
||||
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
|
||||
GEN10_L3BANK_MASK;
|
||||
|
@ -1002,7 +1002,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
}
|
||||
subslice--;
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
|
||||
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
|
||||
} else {
|
||||
|
@ -1171,9 +1171,9 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
dg1_gt_workarounds_init(i915, wal);
|
||||
else if (IS_TIGERLAKE(i915))
|
||||
tgl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
icl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_gt_workarounds_init(i915, wal);
|
||||
|
@ -1193,18 +1193,18 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
vlv_gt_workarounds_init(i915, wal);
|
||||
else if (IS_IVYBRIDGE(i915))
|
||||
ivb_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 6))
|
||||
else if (GRAPHICS_VER(i915) == 6)
|
||||
snb_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 5))
|
||||
else if (GRAPHICS_VER(i915) == 5)
|
||||
ilk_gt_workarounds_init(i915, wal);
|
||||
else if (IS_G4X(i915))
|
||||
g4x_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 4))
|
||||
else if (GRAPHICS_VER(i915) == 4)
|
||||
gen4_gt_workarounds_init(i915, wal);
|
||||
else if (INTEL_GEN(i915) <= 8)
|
||||
else if (GRAPHICS_VER(i915) <= 8)
|
||||
;
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
}
|
||||
|
||||
void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
||||
|
@ -1558,9 +1558,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
|||
|
||||
if (IS_DG1(i915))
|
||||
dg1_whitelist_build(engine);
|
||||
else if (IS_GEN(i915, 12))
|
||||
else if (GRAPHICS_VER(i915) == 12)
|
||||
tgl_whitelist_build(engine);
|
||||
else if (IS_GEN(i915, 11))
|
||||
else if (GRAPHICS_VER(i915) == 11)
|
||||
icl_whitelist_build(engine);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_whitelist_build(engine);
|
||||
|
@ -1576,10 +1576,10 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
|||
bxt_whitelist_build(engine);
|
||||
else if (IS_SKYLAKE(i915))
|
||||
skl_whitelist_build(engine);
|
||||
else if (INTEL_GEN(i915) <= 8)
|
||||
else if (GRAPHICS_VER(i915) <= 8)
|
||||
;
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
MISSING_CASE(GRAPHICS_VER(i915));
|
||||
|
||||
wa_init_finish(w);
|
||||
}
|
||||
|
@ -1695,7 +1695,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
ENABLE_SMALLPL);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
if (GRAPHICS_VER(i915) == 11) {
|
||||
/* This is not an Wa. Enable for better image quality */
|
||||
wa_masked_en(wal,
|
||||
_3D_CHICKEN3,
|
||||
|
@ -1793,7 +1793,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(i915, 9, 12)) {
|
||||
if (IS_GRAPHICS_VER(i915, 9, 12)) {
|
||||
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
|
||||
wa_masked_en(wal,
|
||||
GEN7_FF_SLICE_CS_CHICKEN1,
|
||||
|
@ -1817,7 +1817,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 9)) {
|
||||
if (GRAPHICS_VER(i915) == 9) {
|
||||
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
|
||||
wa_masked_en(wal,
|
||||
GEN9_CSFE_CHICKEN1_RCS,
|
||||
|
@ -1921,7 +1921,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 7)) {
|
||||
if (GRAPHICS_VER(i915) == 7) {
|
||||
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
|
||||
wa_masked_en(wal,
|
||||
GFX_MODE_GEN7,
|
||||
|
@ -1953,7 +1953,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN6_WIZ_HASHING_16x4);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(i915, 6, 7))
|
||||
if (IS_GRAPHICS_VER(i915, 6, 7))
|
||||
/*
|
||||
* We need to disable the AsyncFlip performance optimisations in
|
||||
* order to use MI_WAIT_FOR_EVENT within the CS. It should
|
||||
|
@ -1965,7 +1965,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
MI_MODE,
|
||||
ASYNC_FLIP_PERF_DISABLE);
|
||||
|
||||
if (IS_GEN(i915, 6)) {
|
||||
if (GRAPHICS_VER(i915) == 6) {
|
||||
/*
|
||||
* Required for the hardware to program scanline values for
|
||||
* waiting
|
||||
|
@ -2019,14 +2019,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
CM0_STC_EVICT_DISABLE_LRA_SNB);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(i915, 4, 6))
|
||||
if (IS_GRAPHICS_VER(i915, 4, 6))
|
||||
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
|
||||
wa_add(wal, MI_MODE,
|
||||
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
|
||||
/* XXX bit doesn't stick on Broadwater */
|
||||
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
|
||||
|
||||
if (IS_GEN(i915, 4))
|
||||
if (GRAPHICS_VER(i915) == 4)
|
||||
/*
|
||||
* Disable CONSTANT_BUFFER before it is loaded from the context
|
||||
* image. For as it is loaded, it is executed and the stored
|
||||
|
@ -2058,7 +2058,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
static void
|
||||
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
{
|
||||
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
|
||||
if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
|
||||
return;
|
||||
|
||||
if (engine->class == RENDER_CLASS)
|
||||
|
@ -2071,7 +2071,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
|
|||
{
|
||||
struct i915_wa_list *wal = &engine->wa_list;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 4)
|
||||
if (GRAPHICS_VER(engine->i915) < 4)
|
||||
return;
|
||||
|
||||
wa_init_start(wal, "engine", engine->name);
|
||||
|
@ -2112,9 +2112,9 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
|||
const struct mcr_range *mcr_ranges;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
if (GRAPHICS_VER(i915) >= 12)
|
||||
mcr_ranges = mcr_ranges_gen12;
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
mcr_ranges = mcr_ranges_gen8;
|
||||
else
|
||||
return false;
|
||||
|
@ -2143,7 +2143,7 @@ wa_list_srm(struct i915_request *rq,
|
|||
u32 srm, *cs;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
if (INTEL_GEN(i915) >= 8)
|
||||
if (GRAPHICS_VER(i915) >= 8)
|
||||
srm++;
|
||||
|
||||
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
||||
|
|
|
@ -52,7 +52,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
|
|||
return PTR_ERR(cs);
|
||||
|
||||
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
|
||||
if (INTEL_GEN(rq->engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(rq->engine->i915) >= 8)
|
||||
cmd++;
|
||||
*cs++ = cmd;
|
||||
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
|
||||
|
@ -125,7 +125,7 @@ static int perf_mi_bb_start(void *arg)
|
|||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
return 0;
|
||||
|
||||
perf_begin(gt);
|
||||
|
@ -249,7 +249,7 @@ static int perf_mi_noop(void *arg)
|
|||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
|
||||
return 0;
|
||||
|
||||
perf_begin(gt);
|
||||
|
|
|
@ -198,7 +198,7 @@ static int live_engine_timestamps(void *arg)
|
|||
* the same CS clock.
|
||||
*/
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
if (GRAPHICS_VER(gt->i915) < 8)
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
|
|
|
@ -3269,7 +3269,7 @@ static int live_preempt_user(void *arg)
|
|||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
|
||||
continue; /* we need per-context GPR */
|
||||
|
||||
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
|
||||
|
@ -4293,7 +4293,7 @@ static int live_virtual_preserved(void *arg)
|
|||
return 0;
|
||||
|
||||
/* As we use CS_GPR we cannot run before they existed on all engines. */
|
||||
if (INTEL_GEN(gt->i915) < 9)
|
||||
if (GRAPHICS_VER(gt->i915) < 9)
|
||||
return 0;
|
||||
|
||||
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
|
||||
|
|
|
@ -74,10 +74,10 @@ static int live_gt_clocks(void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
|
||||
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
|
||||
return 0;
|
||||
|
||||
if (IS_GEN(gt->i915, 5))
|
||||
if (GRAPHICS_VER(gt->i915) == 5)
|
||||
/*
|
||||
* XXX CS_TIMESTAMP low dword is dysfunctional?
|
||||
*
|
||||
|
@ -86,7 +86,7 @@ static int live_gt_clocks(void *arg)
|
|||
*/
|
||||
return 0;
|
||||
|
||||
if (IS_GEN(gt->i915, 4))
|
||||
if (GRAPHICS_VER(gt->i915) == 4)
|
||||
/*
|
||||
* XXX CS_TIMESTAMP appears gibberish
|
||||
*
|
||||
|
@ -105,7 +105,7 @@ static int live_gt_clocks(void *arg)
|
|||
u64 time;
|
||||
u64 dt;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
|
||||
if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
|
||||
continue;
|
||||
|
||||
measure_clocks(engine, &cycles, &dt);
|
||||
|
|
|
@ -180,7 +180,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
goto cancel_rq;
|
||||
|
||||
batch = h->batch;
|
||||
if (INTEL_GEN(gt->i915) >= 8) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 8) {
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = upper_32_bits(hws_address(hws, rq));
|
||||
|
@ -194,7 +194,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
*batch++ = upper_32_bits(vma->node.start);
|
||||
} else if (INTEL_GEN(gt->i915) >= 6) {
|
||||
} else if (GRAPHICS_VER(gt->i915) >= 6) {
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*batch++ = 0;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
|
@ -207,7 +207,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
*batch++ = MI_NOOP;
|
||||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
} else if (INTEL_GEN(gt->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(gt->i915) >= 4) {
|
||||
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*batch++ = 0;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
|
@ -243,7 +243,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
flags = 0;
|
||||
if (INTEL_GEN(gt->i915) <= 5)
|
||||
if (GRAPHICS_VER(gt->i915) <= 5)
|
||||
flags |= I915_DISPATCH_SECURE;
|
||||
|
||||
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
|
||||
|
|
|
@ -44,7 +44,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
|||
if (found != ia_freq) {
|
||||
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
|
||||
intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
found, ia_freq);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
@ -54,7 +54,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
|||
if (found != ring_freq) {
|
||||
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
|
||||
intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
found, ring_freq);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -584,7 +584,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
|
|||
int err;
|
||||
int n;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
|
||||
if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
|
||||
return 0; /* GPR only on rcs0 for gen8 */
|
||||
|
||||
err = gpr_make_dirty(engine->kernel_context);
|
||||
|
@ -1389,10 +1389,10 @@ err_A:
|
|||
|
||||
static bool skip_isolation(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
|
||||
if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
|
||||
return true;
|
||||
|
||||
if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
|
||||
if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1551,7 +1551,7 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
|
|||
/* We use the already reserved extra page in context state */
|
||||
if (!a->wa_bb_page) {
|
||||
GEM_BUG_ON(b->wa_bb_page);
|
||||
GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
|
||||
GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
|
||||
goto unpin_b;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
|||
* which only controls CPU initiated MMIO. Routing does not
|
||||
* work for CS access so we cannot verify them on this path.
|
||||
*/
|
||||
return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
|
||||
return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
|
||||
}
|
||||
|
||||
static int check_l3cc_table(struct intel_engine_cs *engine,
|
||||
|
|
|
@ -140,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
|
|||
}
|
||||
|
||||
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
|
||||
if (INTEL_GEN(rq->engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(rq->engine->i915) >= 8)
|
||||
cmd++;
|
||||
|
||||
*cs++ = cmd;
|
||||
|
@ -193,7 +193,7 @@ int live_rc6_ctx_wa(void *arg)
|
|||
int err = 0;
|
||||
|
||||
/* A read of CTX_INFO upsets rc6. Poke the bear! */
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
if (GRAPHICS_VER(gt->i915) < 8)
|
||||
return 0;
|
||||
|
||||
engines = randomised_engines(gt, &prng, &count);
|
||||
|
|
|
@ -41,10 +41,10 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
|
|||
return ERR_CAST(cs);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 6) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 6) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*cs++ = 0;
|
||||
} else if (INTEL_GEN(engine->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(engine->i915) >= 4) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = 0;
|
||||
} else {
|
||||
|
@ -266,7 +266,7 @@ static int live_ctx_switch_wa(void *arg)
|
|||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
if (IS_GEN_RANGE(gt->i915, 4, 5))
|
||||
if (IS_GRAPHICS_VER(gt->i915, 4, 5))
|
||||
continue; /* MI_STORE_DWORD is privileged! */
|
||||
|
||||
saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
|
||||
|
|
|
@ -204,7 +204,7 @@ static void show_pstate_limits(struct intel_rps *rps)
|
|||
i915_mmio_reg_offset(BXT_RP_STATE_CAP),
|
||||
intel_uncore_read(rps_to_uncore(rps),
|
||||
BXT_RP_STATE_CAP));
|
||||
} else if (IS_GEN(i915, 9)) {
|
||||
} else if (GRAPHICS_VER(i915) == 9) {
|
||||
pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
|
||||
i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
|
||||
intel_uncore_read(rps_to_uncore(rps),
|
||||
|
@ -222,7 +222,7 @@ int live_rps_clock_interval(void *arg)
|
|||
struct igt_spinner spin;
|
||||
int err = 0;
|
||||
|
||||
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
if (igt_spinner_init(&spin, gt))
|
||||
|
@ -506,7 +506,7 @@ static void show_pcu_config(struct intel_rps *rps)
|
|||
|
||||
min_gpu_freq = rps->min_freq;
|
||||
max_gpu_freq = rps->max_freq;
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
if (GRAPHICS_VER(i915) >= 9) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
max_gpu_freq /= GEN9_FREQ_SCALER;
|
||||
|
@ -614,7 +614,7 @@ int live_rps_frequency_cs(void *arg)
|
|||
if (!intel_rps_is_enabled(rps))
|
||||
return 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
|
||||
if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
|
||||
return 0;
|
||||
|
||||
if (CPU_LATENCY >= 0)
|
||||
|
@ -755,7 +755,7 @@ int live_rps_frequency_srm(void *arg)
|
|||
if (!intel_rps_is_enabled(rps))
|
||||
return 0;
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
|
||||
if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
|
||||
return 0;
|
||||
|
||||
if (CPU_LATENCY >= 0)
|
||||
|
@ -1031,7 +1031,7 @@ int live_rps_interrupt(void *arg)
|
|||
* First, let's check whether or not we are receiving interrupts.
|
||||
*/
|
||||
|
||||
if (!intel_rps_has_interrupts(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
intel_gt_pm_get(gt);
|
||||
|
@ -1136,7 +1136,7 @@ int live_rps_power(void *arg)
|
|||
* that theory.
|
||||
*/
|
||||
|
||||
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
if (!librapl_supported(gt->i915))
|
||||
|
@ -1240,7 +1240,7 @@ int live_rps_dynamic(void *arg)
|
|||
* moving parts into dynamic reclocking based on load.
|
||||
*/
|
||||
|
||||
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
|
||||
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
|
||||
return 0;
|
||||
|
||||
if (igt_spinner_init(&spin, gt))
|
||||
|
|
|
@ -457,12 +457,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
|
|||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
if (INTEL_GEN(rq->engine->i915) >= 8) {
|
||||
if (GRAPHICS_VER(rq->engine->i915) >= 8) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = addr;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
} else if (INTEL_GEN(rq->engine->i915) >= 4) {
|
||||
} else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = addr;
|
||||
|
@ -992,7 +992,7 @@ static int live_hwsp_read(void *arg)
|
|||
* even across multiple wraps.
|
||||
*/
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */
|
||||
if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
|
||||
return 0;
|
||||
|
||||
tl = intel_timeline_create(gt);
|
||||
|
|
|
@ -145,7 +145,7 @@ read_nonprivs(struct intel_context *ce)
|
|||
goto err_req;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
srm++;
|
||||
|
||||
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
|
||||
|
@ -546,7 +546,7 @@ retry:
|
|||
|
||||
srm = MI_STORE_REGISTER_MEM;
|
||||
lrm = MI_LOAD_REGISTER_MEM;
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
lrm++, srm++;
|
||||
|
||||
pr_debug("%s: Writing garbage to %x\n",
|
||||
|
@ -749,7 +749,7 @@ static int live_dirty_whitelist(void *arg)
|
|||
|
||||
/* Can the user write to the whitelisted registers? */
|
||||
|
||||
if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
|
||||
if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
|
@ -829,7 +829,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
|
|||
goto err_req;
|
||||
|
||||
srm = MI_STORE_REGISTER_MEM;
|
||||
if (INTEL_GEN(engine->i915) >= 8)
|
||||
if (GRAPHICS_VER(engine->i915) >= 8)
|
||||
srm++;
|
||||
|
||||
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2014-2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ABI_GUC_ACTIONS_ABI_H
|
||||
#define _ABI_GUC_ACTIONS_ABI_H
|
||||
|
||||
enum intel_guc_action {
|
||||
INTEL_GUC_ACTION_DEFAULT = 0x0,
|
||||
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
|
||||
INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
|
||||
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
|
||||
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
|
||||
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
|
||||
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
|
||||
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
|
||||
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
|
||||
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
|
||||
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
|
||||
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
|
||||
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
|
||||
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
|
||||
INTEL_GUC_ACTION_LIMIT
|
||||
};
|
||||
|
||||
enum intel_guc_preempt_options {
|
||||
INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
|
||||
INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
|
||||
};
|
||||
|
||||
enum intel_guc_report_status {
|
||||
INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
|
||||
INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
|
||||
INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
|
||||
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
|
||||
};
|
||||
|
||||
enum intel_guc_sleep_state_status {
|
||||
INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
|
||||
INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
|
||||
INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
|
||||
#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
|
||||
};
|
||||
|
||||
#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
|
||||
#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
|
||||
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
|
||||
#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
|
||||
|
||||
#endif /* _ABI_GUC_ACTIONS_ABI_H */
|
|
@ -0,0 +1,106 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2014-2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ABI_GUC_COMMUNICATION_CTB_ABI_H
|
||||
#define _ABI_GUC_COMMUNICATION_CTB_ABI_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* DOC: CTB based communication
|
||||
*
|
||||
* The CTB (command transport buffer) communication between Host and GuC
|
||||
* is based on u32 data stream written to the shared buffer. One buffer can
|
||||
* be used to transmit data only in one direction (one-directional channel).
|
||||
*
|
||||
* Current status of the each buffer is stored in the buffer descriptor.
|
||||
* Buffer descriptor holds tail and head fields that represents active data
|
||||
* stream. The tail field is updated by the data producer (sender), and head
|
||||
* field is updated by the data consumer (receiver)::
|
||||
*
|
||||
* +------------+
|
||||
* | DESCRIPTOR | +=================+============+========+
|
||||
* +============+ | | MESSAGE(s) | |
|
||||
* | address |--------->+=================+============+========+
|
||||
* +------------+
|
||||
* | head | ^-----head--------^
|
||||
* +------------+
|
||||
* | tail | ^---------tail-----------------^
|
||||
* +------------+
|
||||
* | size | ^---------------size--------------------^
|
||||
* +------------+
|
||||
*
|
||||
* Each message in data stream starts with the single u32 treated as a header,
|
||||
* followed by optional set of u32 data that makes message specific payload::
|
||||
*
|
||||
* +------------+---------+---------+---------+
|
||||
* | MESSAGE |
|
||||
* +------------+---------+---------+---------+
|
||||
* | msg[0] | [1] | ... | [n-1] |
|
||||
* +------------+---------+---------+---------+
|
||||
* | MESSAGE | MESSAGE PAYLOAD |
|
||||
* + HEADER +---------+---------+---------+
|
||||
* | | 0 | ... | n |
|
||||
* +======+=====+=========+=========+=========+
|
||||
* | 31:16| code| | | |
|
||||
* +------+-----+ | | |
|
||||
* | 15:5|flags| | | |
|
||||
* +------+-----+ | | |
|
||||
* | 4:0| len| | | |
|
||||
* +------+-----+---------+---------+---------+
|
||||
*
|
||||
* ^-------------len-------------^
|
||||
*
|
||||
* The message header consists of:
|
||||
*
|
||||
* - **len**, indicates length of the message payload (in u32)
|
||||
* - **code**, indicates message code
|
||||
* - **flags**, holds various bits to control message handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* Describes single command transport buffer.
|
||||
* Used by both guc-master and clients.
|
||||
*/
|
||||
struct guc_ct_buffer_desc {
|
||||
u32 addr; /* gfx address */
|
||||
u64 host_private; /* host private data */
|
||||
u32 size; /* size in bytes */
|
||||
u32 head; /* offset updated by GuC*/
|
||||
u32 tail; /* offset updated by owner */
|
||||
u32 is_in_error; /* error indicator */
|
||||
u32 reserved1;
|
||||
u32 reserved2;
|
||||
u32 owner; /* id of the channel owner */
|
||||
u32 owner_sub_id; /* owner-defined field for extra tracking */
|
||||
u32 reserved[5];
|
||||
} __packed;
|
||||
|
||||
/* Type of command transport buffer */
|
||||
#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u
|
||||
#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u
|
||||
|
||||
/*
|
||||
* Definition of the command transport message header (DW0)
|
||||
*
|
||||
* bit[4..0] message len (in dwords)
|
||||
* bit[7..5] reserved
|
||||
* bit[8] response (G2H only)
|
||||
* bit[8] write fence to desc (H2G only)
|
||||
* bit[9] write status to H2G buff (H2G only)
|
||||
* bit[10] send status back via G2H (H2G only)
|
||||
* bit[15..11] reserved
|
||||
* bit[31..16] action code
|
||||
*/
|
||||
#define GUC_CT_MSG_LEN_SHIFT 0
|
||||
#define GUC_CT_MSG_LEN_MASK 0x1F
|
||||
#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
|
||||
#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
|
||||
#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
|
||||
#define GUC_CT_MSG_SEND_STATUS (1 << 10)
|
||||
#define GUC_CT_MSG_ACTION_SHIFT 16
|
||||
#define GUC_CT_MSG_ACTION_MASK 0xFFFF
|
||||
|
||||
#endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */
|
|
@ -0,0 +1,52 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2014-2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ABI_GUC_COMMUNICATION_MMIO_ABI_H
|
||||
#define _ABI_GUC_COMMUNICATION_MMIO_ABI_H
|
||||
|
||||
/**
|
||||
* DOC: MMIO based communication
|
||||
*
|
||||
* The MMIO based communication between Host and GuC uses software scratch
|
||||
* registers, where first register holds data treated as message header,
|
||||
* and other registers are used to hold message payload.
|
||||
*
|
||||
* For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
|
||||
* but no H2G command takes more than 8 parameters and the GuC FW
|
||||
* itself uses an 8-element array to store the H2G message.
|
||||
*
|
||||
* +-----------+---------+---------+---------+
|
||||
* | MMIO[0] | MMIO[1] | ... | MMIO[n] |
|
||||
* +-----------+---------+---------+---------+
|
||||
* | header | optional payload |
|
||||
* +======+====+=========+=========+=========+
|
||||
* | 31:28|type| | | |
|
||||
* +------+----+ | | |
|
||||
* | 27:16|data| | | |
|
||||
* +------+----+ | | |
|
||||
* | 15:0|code| | | |
|
||||
* +------+----+---------+---------+---------+
|
||||
*
|
||||
* The message header consists of:
|
||||
*
|
||||
* - **type**, indicates message type
|
||||
* - **code**, indicates message code, is specific for **type**
|
||||
* - **data**, indicates message data, optional, depends on **code**
|
||||
*
|
||||
* The following message **types** are supported:
|
||||
*
|
||||
* - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
|
||||
* must be priovided in **code** field. Optional action specific parameters
|
||||
* can be provided in remaining payload registers or **data** field.
|
||||
*
|
||||
* - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
|
||||
* action response status will be provided in **code** field. Optional
|
||||
* response data can be returned in remaining payload registers or **data**
|
||||
* field.
|
||||
*/
|
||||
|
||||
#define GUC_MAX_MMIO_MSG_LEN 8
|
||||
|
||||
#endif /* _ABI_GUC_COMMUNICATION_MMIO_ABI_H */
|
|
@ -0,0 +1,14 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2014-2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ABI_GUC_ERRORS_ABI_H
|
||||
#define _ABI_GUC_ERRORS_ABI_H
|
||||
|
||||
enum intel_guc_response_status {
|
||||
INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
|
||||
INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
|
||||
};
|
||||
|
||||
#endif /* _ABI_GUC_ERRORS_ABI_H */
|
|
@ -0,0 +1,21 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2014-2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ABI_GUC_MESSAGES_ABI_H
|
||||
#define _ABI_GUC_MESSAGES_ABI_H
|
||||
|
||||
#define INTEL_GUC_MSG_TYPE_SHIFT 28
|
||||
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
|
||||
#define INTEL_GUC_MSG_DATA_SHIFT 16
|
||||
#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
|
||||
#define INTEL_GUC_MSG_CODE_SHIFT 0
|
||||
#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
|
||||
|
||||
enum intel_guc_msg_type {
|
||||
INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
|
||||
INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
|
||||
};
|
||||
|
||||
#endif /* _ABI_GUC_MESSAGES_ABI_H */
|
|
@ -60,15 +60,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
|
|||
enum forcewake_domains fw_domains = 0;
|
||||
unsigned int i;
|
||||
|
||||
if (INTEL_GEN(gt->i915) >= 11) {
|
||||
guc->send_regs.base =
|
||||
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
|
||||
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
|
||||
} else {
|
||||
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
|
||||
guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
|
||||
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
|
||||
}
|
||||
GEM_BUG_ON(!guc->send_regs.base);
|
||||
GEM_BUG_ON(!guc->send_regs.count);
|
||||
|
||||
for (i = 0; i < guc->send_regs.count; i++) {
|
||||
fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
|
||||
|
@ -96,12 +89,9 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
|
|||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (!guc->interrupts.enabled) {
|
||||
WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
|
||||
gt->pm_guc_events);
|
||||
guc->interrupts.enabled = true;
|
||||
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
|
||||
}
|
||||
WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
|
||||
gt->pm_guc_events);
|
||||
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
}
|
||||
|
||||
|
@ -112,7 +102,6 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
|
|||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
guc->interrupts.enabled = false;
|
||||
|
||||
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
|
||||
|
||||
|
@ -134,18 +123,14 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
|
|||
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (!guc->interrupts.enabled) {
|
||||
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
|
||||
|
||||
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN11_GUC_SG_INTR_ENABLE, events);
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN11_GUC_SG_INTR_MASK, ~events);
|
||||
guc->interrupts.enabled = true;
|
||||
}
|
||||
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN11_GUC_SG_INTR_ENABLE, events);
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN11_GUC_SG_INTR_MASK, ~events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
}
|
||||
|
||||
|
@ -154,7 +139,6 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc)
|
|||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
guc->interrupts.enabled = false;
|
||||
|
||||
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
|
||||
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
|
||||
|
@ -176,16 +160,23 @@ void intel_guc_init_early(struct intel_guc *guc)
|
|||
|
||||
mutex_init(&guc->send_mutex);
|
||||
spin_lock_init(&guc->irq_lock);
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
|
||||
guc->interrupts.reset = gen11_reset_guc_interrupts;
|
||||
guc->interrupts.enable = gen11_enable_guc_interrupts;
|
||||
guc->interrupts.disable = gen11_disable_guc_interrupts;
|
||||
guc->send_regs.base =
|
||||
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
|
||||
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
|
||||
|
||||
} else {
|
||||
guc->notify_reg = GUC_SEND_INTERRUPT;
|
||||
guc->interrupts.reset = gen9_reset_guc_interrupts;
|
||||
guc->interrupts.enable = gen9_enable_guc_interrupts;
|
||||
guc->interrupts.disable = gen9_disable_guc_interrupts;
|
||||
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
|
||||
guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
|
||||
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,22 +460,6 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int intel_guc_sample_forcewake(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
|
||||
u32 action[2];
|
||||
|
||||
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
/* WaRsDisableCoarsePowerGating:skl,cnl */
|
||||
if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
|
||||
action[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
|
||||
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
|
||||
* @guc: intel_guc structure
|
||||
|
|
|
@ -33,7 +33,6 @@ struct intel_guc {
|
|||
unsigned int msg_enabled_mask;
|
||||
|
||||
struct {
|
||||
bool enabled;
|
||||
void (*reset)(struct intel_guc *guc);
|
||||
void (*enable)(struct intel_guc *guc);
|
||||
void (*disable)(struct intel_guc *guc);
|
||||
|
@ -128,7 +127,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
|
|||
u32 *response_buf, u32 response_buf_size);
|
||||
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
|
||||
const u32 *payload, u32 len);
|
||||
int intel_guc_sample_forcewake(struct intel_guc *guc);
|
||||
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
|
||||
int intel_guc_suspend(struct intel_guc *guc);
|
||||
int intel_guc_resume(struct intel_guc *guc);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_lrc.h"
|
||||
#include "intel_guc_ads.h"
|
||||
#include "intel_guc_fwif.h"
|
||||
#include "intel_uc.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -104,7 +105,7 @@ static void guc_mapping_table_init(struct intel_gt *gt,
|
|||
GUC_MAX_INSTANCES_PER_CLASS;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
u8 guc_class = engine->class;
|
||||
u8 guc_class = engine_class_to_guc_class(engine->class);
|
||||
|
||||
system_info->mapping_table[guc_class][engine->instance] =
|
||||
engine->instance;
|
||||
|
@ -124,7 +125,7 @@ static void __guc_ads_init(struct intel_guc *guc)
|
|||
struct __guc_ads_blob *blob = guc->ads_blob;
|
||||
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
|
||||
u32 base;
|
||||
u8 engine_class;
|
||||
u8 engine_class, guc_class;
|
||||
|
||||
/* GuC scheduling policies */
|
||||
guc_policies_init(&blob->policies);
|
||||
|
@ -140,29 +141,32 @@ static void __guc_ads_init(struct intel_guc *guc)
|
|||
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
|
||||
if (engine_class == OTHER_CLASS)
|
||||
continue;
|
||||
|
||||
guc_class = engine_class_to_guc_class(engine_class);
|
||||
|
||||
/*
|
||||
* TODO: Set context pointer to default state to allow
|
||||
* GuC to re-init guilty contexts after internal reset.
|
||||
*/
|
||||
blob->ads.golden_context_lrca[engine_class] = 0;
|
||||
blob->ads.eng_state_size[engine_class] =
|
||||
blob->ads.golden_context_lrca[guc_class] = 0;
|
||||
blob->ads.eng_state_size[guc_class] =
|
||||
intel_engine_context_size(guc_to_gt(guc),
|
||||
engine_class) -
|
||||
skipped_size;
|
||||
}
|
||||
|
||||
/* System info */
|
||||
blob->system_info.engine_enabled_masks[RENDER_CLASS] = 1;
|
||||
blob->system_info.engine_enabled_masks[COPY_ENGINE_CLASS] = 1;
|
||||
blob->system_info.engine_enabled_masks[VIDEO_DECODE_CLASS] = VDBOX_MASK(gt);
|
||||
blob->system_info.engine_enabled_masks[VIDEO_ENHANCEMENT_CLASS] = VEBOX_MASK(gt);
|
||||
blob->system_info.engine_enabled_masks[GUC_RENDER_CLASS] = 1;
|
||||
blob->system_info.engine_enabled_masks[GUC_BLITTER_CLASS] = 1;
|
||||
blob->system_info.engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt);
|
||||
blob->system_info.engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt);
|
||||
|
||||
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] =
|
||||
hweight8(gt->info.sseu.slice_mask);
|
||||
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] =
|
||||
gt->info.vdbox_sfc_access;
|
||||
|
||||
if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) {
|
||||
if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
|
||||
u32 distdbreg = intel_uncore_read(gt->uncore,
|
||||
GEN12_DIST_DBS_POPULATED);
|
||||
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] =
|
||||
|
|
|
@ -7,46 +7,6 @@
|
|||
#include "intel_guc_ct.h"
|
||||
#include "gt/intel_gt.h"
|
||||
|
||||
#define CT_ERROR(_ct, _fmt, ...) \
|
||||
DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
|
||||
#ifdef CONFIG_DRM_I915_DEBUG_GUC
|
||||
#define CT_DEBUG(_ct, _fmt, ...) \
|
||||
DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define CT_DEBUG(...) do { } while (0)
|
||||
#endif
|
||||
|
||||
struct ct_request {
|
||||
struct list_head link;
|
||||
u32 fence;
|
||||
u32 status;
|
||||
u32 response_len;
|
||||
u32 *response_buf;
|
||||
};
|
||||
|
||||
struct ct_incoming_request {
|
||||
struct list_head link;
|
||||
u32 msg[];
|
||||
};
|
||||
|
||||
enum { CTB_SEND = 0, CTB_RECV = 1 };
|
||||
|
||||
enum { CTB_OWNER_HOST = 0 };
|
||||
|
||||
static void ct_incoming_request_worker_func(struct work_struct *w);
|
||||
|
||||
/**
|
||||
* intel_guc_ct_init_early - Initialize CT state without requiring device access
|
||||
* @ct: pointer to CT struct
|
||||
*/
|
||||
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
|
||||
{
|
||||
spin_lock_init(&ct->requests.lock);
|
||||
INIT_LIST_HEAD(&ct->requests.pending);
|
||||
INIT_LIST_HEAD(&ct->requests.incoming);
|
||||
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
|
||||
}
|
||||
|
||||
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
|
||||
{
|
||||
return container_of(ct, struct intel_guc, ct);
|
||||
|
@ -62,9 +22,82 @@ static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
|
|||
return ct_to_gt(ct)->i915;
|
||||
}
|
||||
|
||||
static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
|
||||
static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
|
||||
{
|
||||
return ct_to_i915(ct)->drm.dev;
|
||||
return &ct_to_i915(ct)->drm;
|
||||
}
|
||||
|
||||
#define CT_ERROR(_ct, _fmt, ...) \
|
||||
drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
|
||||
#ifdef CONFIG_DRM_I915_DEBUG_GUC
|
||||
#define CT_DEBUG(_ct, _fmt, ...) \
|
||||
drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define CT_DEBUG(...) do { } while (0)
|
||||
#endif
|
||||
#define CT_PROBE_ERROR(_ct, _fmt, ...) \
|
||||
i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* DOC: CTB Blob
|
||||
*
|
||||
* We allocate single blob to hold both CTB descriptors and buffers:
|
||||
*
|
||||
* +--------+-----------------------------------------------+------+
|
||||
* | offset | contents | size |
|
||||
* +========+===============================================+======+
|
||||
* | 0x0000 | H2G `CTB Descriptor`_ (send) | |
|
||||
* +--------+-----------------------------------------------+ 4K |
|
||||
* | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
|
||||
* +--------+-----------------------------------------------+------+
|
||||
* | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
|
||||
* | | | |
|
||||
* +--------+-----------------------------------------------+------+
|
||||
* | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
|
||||
* | + n*4K | | |
|
||||
* +--------+-----------------------------------------------+------+
|
||||
*
|
||||
* Size of each `CT Buffer`_ must be multiple of 4K.
|
||||
* As we don't expect too many messages, for now use minimum sizes.
|
||||
*/
|
||||
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
|
||||
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
|
||||
#define CTB_G2H_BUFFER_SIZE (SZ_4K)
|
||||
|
||||
struct ct_request {
|
||||
struct list_head link;
|
||||
u32 fence;
|
||||
u32 status;
|
||||
u32 response_len;
|
||||
u32 *response_buf;
|
||||
};
|
||||
|
||||
struct ct_incoming_msg {
|
||||
struct list_head link;
|
||||
u32 size;
|
||||
u32 msg[];
|
||||
};
|
||||
|
||||
enum { CTB_SEND = 0, CTB_RECV = 1 };
|
||||
|
||||
enum { CTB_OWNER_HOST = 0 };
|
||||
|
||||
static void ct_receive_tasklet_func(struct tasklet_struct *t);
|
||||
static void ct_incoming_request_worker_func(struct work_struct *w);
|
||||
|
||||
/**
|
||||
* intel_guc_ct_init_early - Initialize CT state without requiring device access
|
||||
* @ct: pointer to CT struct
|
||||
*/
|
||||
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
|
||||
{
|
||||
spin_lock_init(&ct->ctbs.send.lock);
|
||||
spin_lock_init(&ct->ctbs.recv.lock);
|
||||
spin_lock_init(&ct->requests.lock);
|
||||
INIT_LIST_HEAD(&ct->requests.pending);
|
||||
INIT_LIST_HEAD(&ct->requests.incoming);
|
||||
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
|
||||
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
|
||||
}
|
||||
|
||||
static inline const char *guc_ct_buffer_type_to_str(u32 type)
|
||||
|
@ -88,11 +121,22 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
|
|||
desc->owner = CTB_OWNER_HOST;
|
||||
}
|
||||
|
||||
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
|
||||
static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb, u32 cmds_addr)
|
||||
{
|
||||
desc->head = 0;
|
||||
desc->tail = 0;
|
||||
desc->is_in_error = 0;
|
||||
guc_ct_buffer_desc_init(ctb->desc, cmds_addr, ctb->size);
|
||||
}
|
||||
|
||||
static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
|
||||
struct guc_ct_buffer_desc *desc,
|
||||
u32 *cmds, u32 size)
|
||||
{
|
||||
GEM_BUG_ON(size % 4);
|
||||
|
||||
ctb->desc = desc;
|
||||
ctb->cmds = cmds;
|
||||
ctb->size = size;
|
||||
|
||||
guc_ct_buffer_reset(ctb, 0);
|
||||
}
|
||||
|
||||
static int guc_action_register_ct_buffer(struct intel_guc *guc,
|
||||
|
@ -153,48 +197,42 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
|
|||
int intel_guc_ct_init(struct intel_guc_ct *ct)
|
||||
{
|
||||
struct intel_guc *guc = ct_to_guc(ct);
|
||||
struct guc_ct_buffer_desc *desc;
|
||||
u32 blob_size;
|
||||
u32 cmds_size;
|
||||
void *blob;
|
||||
u32 *cmds;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
GEM_BUG_ON(ct->vma);
|
||||
|
||||
/* We allocate 1 page to hold both descriptors and both buffers.
|
||||
* ___________.....................
|
||||
* |desc (SEND)| :
|
||||
* |___________| PAGE/4
|
||||
* :___________....................:
|
||||
* |desc (RECV)| :
|
||||
* |___________| PAGE/4
|
||||
* :_______________________________:
|
||||
* |cmds (SEND) |
|
||||
* | PAGE/4
|
||||
* |_______________________________|
|
||||
* |cmds (RECV) |
|
||||
* | PAGE/4
|
||||
* |_______________________________|
|
||||
*
|
||||
* Each message can use a maximum of 32 dwords and we don't expect to
|
||||
* have more than 1 in flight at any time, so we have enough space.
|
||||
* Some logic further ahead will rely on the fact that there is only 1
|
||||
* page and that it is always mapped, so if the size is changed the
|
||||
* other code will need updating as well.
|
||||
*/
|
||||
|
||||
err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
|
||||
blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
|
||||
err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
|
||||
if (unlikely(err)) {
|
||||
CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
|
||||
CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
|
||||
blob_size, ERR_PTR(err));
|
||||
return err;
|
||||
}
|
||||
|
||||
CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
|
||||
CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
|
||||
|
||||
/* store pointers to desc and cmds */
|
||||
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
|
||||
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
|
||||
ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
|
||||
ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
|
||||
}
|
||||
/* store pointers to desc and cmds for send ctb */
|
||||
desc = blob;
|
||||
cmds = blob + 2 * CTB_DESC_SIZE;
|
||||
cmds_size = CTB_H2G_BUFFER_SIZE;
|
||||
CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
|
||||
ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
|
||||
|
||||
guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
|
||||
|
||||
/* store pointers to desc and cmds for recv ctb */
|
||||
desc = blob + CTB_DESC_SIZE;
|
||||
cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
|
||||
cmds_size = CTB_G2H_BUFFER_SIZE;
|
||||
CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
|
||||
ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
|
||||
|
||||
guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -209,6 +247,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
|
|||
{
|
||||
GEM_BUG_ON(ct->enabled);
|
||||
|
||||
tasklet_kill(&ct->receive_tasklet);
|
||||
i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
|
||||
memset(ct, 0, sizeof(*ct));
|
||||
}
|
||||
|
@ -222,37 +261,38 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
|
|||
int intel_guc_ct_enable(struct intel_guc_ct *ct)
|
||||
{
|
||||
struct intel_guc *guc = ct_to_guc(ct);
|
||||
u32 base, cmds, size;
|
||||
u32 base, cmds;
|
||||
void *blob;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
GEM_BUG_ON(ct->enabled);
|
||||
|
||||
/* vma should be already allocated and map'ed */
|
||||
GEM_BUG_ON(!ct->vma);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
|
||||
base = intel_guc_ggtt_offset(guc, ct->vma);
|
||||
|
||||
/* (re)initialize descriptors
|
||||
* cmds buffers are in the second half of the blob page
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
|
||||
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
|
||||
cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
|
||||
size = PAGE_SIZE / 4;
|
||||
CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
|
||||
guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
|
||||
}
|
||||
/* blob should start with send descriptor */
|
||||
blob = __px_vaddr(ct->vma->obj);
|
||||
GEM_BUG_ON(blob != ct->ctbs.send.desc);
|
||||
|
||||
/* (re)initialize descriptors */
|
||||
cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
|
||||
guc_ct_buffer_reset(&ct->ctbs.send, cmds);
|
||||
|
||||
cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
|
||||
guc_ct_buffer_reset(&ct->ctbs.recv, cmds);
|
||||
|
||||
/*
|
||||
* Register both CT buffers starting with RECV buffer.
|
||||
* Descriptors are in first half of the blob.
|
||||
*/
|
||||
err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
|
||||
err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.recv.desc, blob),
|
||||
INTEL_GUC_CT_BUFFER_TYPE_RECV);
|
||||
if (unlikely(err))
|
||||
goto err_out;
|
||||
|
||||
err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
|
||||
err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.send.desc, blob),
|
||||
INTEL_GUC_CT_BUFFER_TYPE_SEND);
|
||||
if (unlikely(err))
|
||||
goto err_deregister;
|
||||
|
@ -264,7 +304,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
|
|||
err_deregister:
|
||||
ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
|
||||
err_out:
|
||||
CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
|
||||
CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -292,6 +332,28 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
|
|||
return ++ct->requests.last_fence;
|
||||
}
|
||||
|
||||
static void write_barrier(struct intel_guc_ct *ct)
|
||||
{
|
||||
struct intel_guc *guc = ct_to_guc(ct);
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
|
||||
GEM_BUG_ON(guc->send_regs.fw_domains);
|
||||
/*
|
||||
* This register is used by the i915 and GuC for MMIO based
|
||||
* communication. Once we are in this code CTBs are the only
|
||||
* method the i915 uses to communicate with the GuC so it is
|
||||
* safe to write to this register (a value of 0 is NOP for MMIO
|
||||
* communication). If we ever start mixing CTBs and MMIOs a new
|
||||
* register will have to be chosen.
|
||||
*/
|
||||
intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
|
||||
} else {
|
||||
/* wmb() sufficient for a barrier if in smem */
|
||||
wmb();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: CTB Host to GuC request
|
||||
*
|
||||
|
@ -313,14 +375,13 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
|
|||
static int ct_write(struct intel_guc_ct *ct,
|
||||
const u32 *action,
|
||||
u32 len /* in dwords */,
|
||||
u32 fence,
|
||||
bool want_response)
|
||||
u32 fence)
|
||||
{
|
||||
struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
|
||||
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
|
||||
struct guc_ct_buffer_desc *desc = ctb->desc;
|
||||
u32 head = desc->head;
|
||||
u32 tail = desc->tail;
|
||||
u32 size = desc->size;
|
||||
u32 size = ctb->size;
|
||||
u32 used;
|
||||
u32 header;
|
||||
u32 *cmds = ctb->cmds;
|
||||
|
@ -329,7 +390,7 @@ static int ct_write(struct intel_guc_ct *ct,
|
|||
if (unlikely(desc->is_in_error))
|
||||
return -EPIPE;
|
||||
|
||||
if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
|
||||
if (unlikely(!IS_ALIGNED(head | tail, 4) ||
|
||||
(tail | head) >= size))
|
||||
goto corrupted;
|
||||
|
||||
|
@ -358,8 +419,7 @@ static int ct_write(struct intel_guc_ct *ct,
|
|||
* DW2+: action data
|
||||
*/
|
||||
header = (len << GUC_CT_MSG_LEN_SHIFT) |
|
||||
(GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
|
||||
(want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
|
||||
GUC_CT_MSG_SEND_STATUS |
|
||||
(action[0] << GUC_CT_MSG_ACTION_SHIFT);
|
||||
|
||||
CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
|
||||
|
@ -377,6 +437,12 @@ static int ct_write(struct intel_guc_ct *ct,
|
|||
}
|
||||
GEM_BUG_ON(tail > size);
|
||||
|
||||
/*
|
||||
* make sure H2G buffer update and LRC tail update (if this triggering a
|
||||
* submission) are visible before updating the descriptor tail
|
||||
*/
|
||||
write_barrier(ct);
|
||||
|
||||
/* now update desc tail (back in bytes) */
|
||||
desc->tail = tail * 4;
|
||||
return 0;
|
||||
|
@ -388,56 +454,6 @@ corrupted:
|
|||
return -EPIPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
|
||||
* @desc: buffer descriptor
|
||||
* @fence: response fence
|
||||
* @status: placeholder for status
|
||||
*
|
||||
* Guc will update CT buffer descriptor with new fence and status
|
||||
* after processing the command identified by the fence. Wait for
|
||||
* specified fence and then read from the descriptor status of the
|
||||
* command.
|
||||
*
|
||||
* Return:
|
||||
* * 0 response received (status is valid)
|
||||
* * -ETIMEDOUT no response within hardcoded timeout
|
||||
* * -EPROTO no response, CT buffer is in error
|
||||
*/
|
||||
static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
|
||||
u32 fence,
|
||||
u32 *status)
|
||||
{
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Fast commands should complete in less than 10us, so sample quickly
|
||||
* up to that length of time, then switch to a slower sleep-wait loop.
|
||||
* No GuC command should ever take longer than 10ms.
|
||||
*/
|
||||
#define done (READ_ONCE(desc->fence) == fence)
|
||||
err = wait_for_us(done, 10);
|
||||
if (err)
|
||||
err = wait_for(done, 10);
|
||||
#undef done
|
||||
|
||||
if (unlikely(err)) {
|
||||
DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
|
||||
fence, desc->fence);
|
||||
|
||||
if (WARN_ON(desc->is_in_error)) {
|
||||
/* Something went wrong with the messaging, try to reset
|
||||
* the buffer and hope for the best
|
||||
*/
|
||||
guc_ct_buffer_desc_reset(desc);
|
||||
err = -EPROTO;
|
||||
}
|
||||
}
|
||||
|
||||
*status = desc->status;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_for_ct_request_update - Wait for CT request state update.
|
||||
* @req: pointer to pending request
|
||||
|
@ -481,8 +497,6 @@ static int ct_send(struct intel_guc_ct *ct,
|
|||
u32 response_buf_size,
|
||||
u32 *status)
|
||||
{
|
||||
struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
|
||||
struct guc_ct_buffer_desc *desc = ctb->desc;
|
||||
struct ct_request request;
|
||||
unsigned long flags;
|
||||
u32 fence;
|
||||
|
@ -493,26 +507,28 @@ static int ct_send(struct intel_guc_ct *ct,
|
|||
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
|
||||
GEM_BUG_ON(!response_buf && response_buf_size);
|
||||
|
||||
spin_lock_irqsave(&ct->ctbs.send.lock, flags);
|
||||
|
||||
fence = ct_get_next_fence(ct);
|
||||
request.fence = fence;
|
||||
request.status = 0;
|
||||
request.response_len = response_buf_size;
|
||||
request.response_buf = response_buf;
|
||||
|
||||
spin_lock_irqsave(&ct->requests.lock, flags);
|
||||
spin_lock(&ct->requests.lock);
|
||||
list_add_tail(&request.link, &ct->requests.pending);
|
||||
spin_unlock_irqrestore(&ct->requests.lock, flags);
|
||||
spin_unlock(&ct->requests.lock);
|
||||
|
||||
err = ct_write(ct, action, len, fence);
|
||||
|
||||
spin_unlock_irqrestore(&ct->ctbs.send.lock, flags);
|
||||
|
||||
err = ct_write(ct, action, len, fence, !!response_buf);
|
||||
if (unlikely(err))
|
||||
goto unlink;
|
||||
|
||||
intel_guc_notify(ct_to_guc(ct));
|
||||
|
||||
if (response_buf)
|
||||
err = wait_for_ct_request_update(&request, status);
|
||||
else
|
||||
err = wait_for_ctb_desc_update(desc, fence, status);
|
||||
err = wait_for_ct_request_update(&request, status);
|
||||
if (unlikely(err))
|
||||
goto unlink;
|
||||
|
||||
|
@ -547,7 +563,6 @@ unlink:
|
|||
int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
|
||||
u32 *response_buf, u32 response_buf_size)
|
||||
{
|
||||
struct intel_guc *guc = ct_to_guc(ct);
|
||||
u32 status = ~0; /* undefined */
|
||||
int ret;
|
||||
|
||||
|
@ -556,8 +571,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
mutex_lock(&guc->send_mutex);
|
||||
|
||||
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
|
||||
if (unlikely(ret < 0)) {
|
||||
CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
|
||||
|
@ -567,7 +580,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
|
|||
action[0], ret, ret);
|
||||
}
|
||||
|
||||
mutex_unlock(&guc->send_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -586,22 +598,42 @@ static inline bool ct_header_is_response(u32 header)
|
|||
return !!(header & GUC_CT_MSG_IS_RESPONSE);
|
||||
}
|
||||
|
||||
static int ct_read(struct intel_guc_ct *ct, u32 *data)
|
||||
static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
|
||||
{
|
||||
struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
|
||||
struct ct_incoming_msg *msg;
|
||||
|
||||
msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
|
||||
if (msg)
|
||||
msg->size = num_dwords;
|
||||
return msg;
|
||||
}
|
||||
|
||||
static void ct_free_msg(struct ct_incoming_msg *msg)
|
||||
{
|
||||
kfree(msg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return: number available remaining dwords to read (0 if empty)
|
||||
* or a negative error code on failure
|
||||
*/
|
||||
static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
|
||||
{
|
||||
struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
|
||||
struct guc_ct_buffer_desc *desc = ctb->desc;
|
||||
u32 head = desc->head;
|
||||
u32 tail = desc->tail;
|
||||
u32 size = desc->size;
|
||||
u32 size = ctb->size;
|
||||
u32 *cmds = ctb->cmds;
|
||||
s32 available;
|
||||
unsigned int len;
|
||||
unsigned int i;
|
||||
u32 header;
|
||||
|
||||
if (unlikely(desc->is_in_error))
|
||||
return -EPIPE;
|
||||
|
||||
if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
|
||||
if (unlikely(!IS_ALIGNED(head | tail, 4) ||
|
||||
(tail | head) >= size))
|
||||
goto corrupted;
|
||||
|
||||
|
@ -612,8 +644,10 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
|
|||
|
||||
/* tail == head condition indicates empty */
|
||||
available = tail - head;
|
||||
if (unlikely(available == 0))
|
||||
return -ENODATA;
|
||||
if (unlikely(available == 0)) {
|
||||
*msg = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* beware of buffer wrap case */
|
||||
if (unlikely(available < 0))
|
||||
|
@ -621,14 +655,14 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
|
|||
CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
|
||||
GEM_BUG_ON(available < 0);
|
||||
|
||||
data[0] = cmds[head];
|
||||
header = cmds[head];
|
||||
head = (head + 1) % size;
|
||||
|
||||
/* message len with header */
|
||||
len = ct_header_get_len(data[0]) + 1;
|
||||
len = ct_header_get_len(header) + 1;
|
||||
if (unlikely(len > (u32)available)) {
|
||||
CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
|
||||
4, data,
|
||||
4, &header,
|
||||
4 * (head + available - 1 > size ?
|
||||
size - head : available - 1), &cmds[head],
|
||||
4 * (head + available - 1 > size ?
|
||||
|
@ -636,14 +670,27 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
|
|||
goto corrupted;
|
||||
}
|
||||
|
||||
*msg = ct_alloc_msg(len);
|
||||
if (!*msg) {
|
||||
CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
|
||||
4, &header,
|
||||
4 * (head + available - 1 > size ?
|
||||
size - head : available - 1), &cmds[head],
|
||||
4 * (head + available - 1 > size ?
|
||||
available - 1 - size + head : 0), &cmds[0]);
|
||||
return available;
|
||||
}
|
||||
|
||||
(*msg)->msg[0] = header;
|
||||
|
||||
for (i = 1; i < len; i++) {
|
||||
data[i] = cmds[head];
|
||||
(*msg)->msg[i] = cmds[head];
|
||||
head = (head + 1) % size;
|
||||
}
|
||||
CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
|
||||
CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
|
||||
|
||||
desc->head = head * 4;
|
||||
return 0;
|
||||
return available - len;
|
||||
|
||||
corrupted:
|
||||
CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
|
||||
|
@ -670,39 +717,39 @@ corrupted:
|
|||
* ^-----------------------len-----------------------^
|
||||
*/
|
||||
|
||||
static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
|
||||
static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
|
||||
{
|
||||
u32 header = msg[0];
|
||||
u32 header = response->msg[0];
|
||||
u32 len = ct_header_get_len(header);
|
||||
u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
|
||||
u32 fence;
|
||||
u32 status;
|
||||
u32 datalen;
|
||||
struct ct_request *req;
|
||||
unsigned long flags;
|
||||
bool found = false;
|
||||
int err = 0;
|
||||
|
||||
GEM_BUG_ON(!ct_header_is_response(header));
|
||||
GEM_BUG_ON(!in_irq());
|
||||
|
||||
/* Response payload shall at least include fence and status */
|
||||
if (unlikely(len < 2)) {
|
||||
CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
|
||||
CT_ERROR(ct, "Corrupted response (len %u)\n", len);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
fence = msg[1];
|
||||
status = msg[2];
|
||||
fence = response->msg[1];
|
||||
status = response->msg[2];
|
||||
datalen = len - 2;
|
||||
|
||||
/* Format of the status follows RESPONSE message */
|
||||
if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
|
||||
CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
|
||||
CT_ERROR(ct, "Corrupted response (status %#x)\n", status);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
|
||||
|
||||
spin_lock(&ct->requests.lock);
|
||||
spin_lock_irqsave(&ct->requests.lock, flags);
|
||||
list_for_each_entry(req, &ct->requests.pending, link) {
|
||||
if (unlikely(fence != req->fence)) {
|
||||
CT_DEBUG(ct, "request %u awaits response\n",
|
||||
|
@ -710,58 +757,75 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
|
|||
continue;
|
||||
}
|
||||
if (unlikely(datalen > req->response_len)) {
|
||||
CT_ERROR(ct, "Response for %u is too long %*ph\n",
|
||||
req->fence, msgsize, msg);
|
||||
datalen = 0;
|
||||
CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
|
||||
req->fence, datalen, req->response_len);
|
||||
datalen = min(datalen, req->response_len);
|
||||
err = -EMSGSIZE;
|
||||
}
|
||||
if (datalen)
|
||||
memcpy(req->response_buf, msg + 3, 4 * datalen);
|
||||
memcpy(req->response_buf, response->msg + 3, 4 * datalen);
|
||||
req->response_len = datalen;
|
||||
WRITE_ONCE(req->status, status);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&ct->requests.lock);
|
||||
spin_unlock_irqrestore(&ct->requests.lock, flags);
|
||||
|
||||
if (!found)
|
||||
CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
|
||||
if (!found) {
|
||||
CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
ct_free_msg(response);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ct_process_request(struct intel_guc_ct *ct,
|
||||
u32 action, u32 len, const u32 *payload)
|
||||
static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
|
||||
{
|
||||
struct intel_guc *guc = ct_to_guc(ct);
|
||||
u32 header, action, len;
|
||||
const u32 *payload;
|
||||
int ret;
|
||||
|
||||
header = request->msg[0];
|
||||
payload = &request->msg[1];
|
||||
action = ct_header_get_action(header);
|
||||
len = ct_header_get_len(header);
|
||||
|
||||
CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
|
||||
|
||||
switch (action) {
|
||||
case INTEL_GUC_ACTION_DEFAULT:
|
||||
ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
|
||||
if (unlikely(ret))
|
||||
goto fail_unexpected;
|
||||
break;
|
||||
|
||||
default:
|
||||
fail_unexpected:
|
||||
CT_ERROR(ct, "Unexpected request %x %*ph\n",
|
||||
action, 4 * len, payload);
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(ret)) {
|
||||
CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
|
||||
action, ERR_PTR(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ct_free_msg(request);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct ct_incoming_request *request;
|
||||
u32 header;
|
||||
u32 *payload;
|
||||
struct ct_incoming_msg *request;
|
||||
bool done;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&ct->requests.lock, flags);
|
||||
request = list_first_entry_or_null(&ct->requests.incoming,
|
||||
struct ct_incoming_request, link);
|
||||
struct ct_incoming_msg, link);
|
||||
if (request)
|
||||
list_del(&request->link);
|
||||
done = !!list_empty(&ct->requests.incoming);
|
||||
|
@ -770,14 +834,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
|
|||
if (!request)
|
||||
return true;
|
||||
|
||||
header = request->msg[0];
|
||||
payload = &request->msg[1];
|
||||
ct_process_request(ct,
|
||||
ct_header_get_action(header),
|
||||
ct_header_get_len(header),
|
||||
payload);
|
||||
err = ct_process_request(ct, request);
|
||||
if (unlikely(err)) {
|
||||
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
|
||||
ERR_PTR(err), 4 * request->size, request->msg);
|
||||
ct_free_msg(request);
|
||||
}
|
||||
|
||||
kfree(request);
|
||||
return done;
|
||||
}
|
||||
|
||||
|
@ -810,22 +873,11 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
|
|||
* ^-----------------------len-----------------------^
|
||||
*/
|
||||
|
||||
static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
|
||||
static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
|
||||
{
|
||||
u32 header = msg[0];
|
||||
u32 len = ct_header_get_len(header);
|
||||
u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
|
||||
struct ct_incoming_request *request;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(ct_header_is_response(header));
|
||||
|
||||
request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
|
||||
if (unlikely(!request)) {
|
||||
CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
|
||||
return 0; /* XXX: -ENOMEM ? */
|
||||
}
|
||||
memcpy(request->msg, msg, msgsize);
|
||||
GEM_BUG_ON(ct_header_is_response(request->msg[0]));
|
||||
|
||||
spin_lock_irqsave(&ct->requests.lock, flags);
|
||||
list_add_tail(&request->link, &ct->requests.incoming);
|
||||
|
@ -835,28 +887,74 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
|
||||
{
|
||||
u32 header = msg->msg[0];
|
||||
int err;
|
||||
|
||||
if (ct_header_is_response(header))
|
||||
err = ct_handle_response(ct, msg);
|
||||
else
|
||||
err = ct_handle_request(ct, msg);
|
||||
|
||||
if (unlikely(err)) {
|
||||
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
|
||||
ERR_PTR(err), 4 * msg->size, msg->msg);
|
||||
ct_free_msg(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return: number available remaining dwords to read (0 if empty)
|
||||
* or a negative error code on failure
|
||||
*/
|
||||
static int ct_receive(struct intel_guc_ct *ct)
|
||||
{
|
||||
struct ct_incoming_msg *msg = NULL;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
|
||||
ret = ct_read(ct, &msg);
|
||||
spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (msg)
|
||||
ct_handle_msg(ct, msg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ct_try_receive_message(struct intel_guc_ct *ct)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (GEM_WARN_ON(!ct->enabled))
|
||||
return;
|
||||
|
||||
ret = ct_receive(ct);
|
||||
if (ret > 0)
|
||||
tasklet_hi_schedule(&ct->receive_tasklet);
|
||||
}
|
||||
|
||||
static void ct_receive_tasklet_func(struct tasklet_struct *t)
|
||||
{
|
||||
struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
|
||||
|
||||
ct_try_receive_message(ct);
|
||||
}
|
||||
|
||||
/*
|
||||
* When we're communicating with the GuC over CT, GuC uses events
|
||||
* to notify us about new messages being posted on the RECV buffer.
|
||||
*/
|
||||
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
|
||||
{
|
||||
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
|
||||
int err = 0;
|
||||
|
||||
if (unlikely(!ct->enabled)) {
|
||||
WARN(1, "Unexpected GuC event received while CT disabled!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
err = ct_read(ct, msg);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
if (ct_header_is_response(msg[0]))
|
||||
err = ct_handle_response(ct, msg);
|
||||
else
|
||||
err = ct_handle_request(ct, msg);
|
||||
} while (!err);
|
||||
ct_try_receive_message(ct);
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#ifndef _INTEL_GUC_CT_H_
|
||||
#define _INTEL_GUC_CT_H_
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
|
@ -27,12 +28,16 @@ struct intel_guc;
|
|||
* record (command transport buffer descriptor) and the actual buffer which
|
||||
* holds the commands.
|
||||
*
|
||||
* @lock: protects access to the commands buffer and buffer descriptor
|
||||
* @desc: pointer to the buffer descriptor
|
||||
* @cmds: pointer to the commands buffer
|
||||
* @size: size of the commands buffer
|
||||
*/
|
||||
struct intel_guc_ct_buffer {
|
||||
spinlock_t lock;
|
||||
struct guc_ct_buffer_desc *desc;
|
||||
u32 *cmds;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
|
||||
|
@ -45,8 +50,13 @@ struct intel_guc_ct {
|
|||
struct i915_vma *vma;
|
||||
bool enabled;
|
||||
|
||||
/* buffers for sending(0) and receiving(1) commands */
|
||||
struct intel_guc_ct_buffer ctbs[2];
|
||||
/* buffers for sending and receiving commands */
|
||||
struct {
|
||||
struct intel_guc_ct_buffer send;
|
||||
struct intel_guc_ct_buffer recv;
|
||||
} ctbs;
|
||||
|
||||
struct tasklet_struct receive_tasklet;
|
||||
|
||||
struct {
|
||||
u32 last_fence; /* last fence used to send request */
|
||||
|
|
|
@ -30,7 +30,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
|
|||
else
|
||||
intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
|
||||
|
||||
if (IS_GEN(uncore->i915, 9)) {
|
||||
if (GRAPHICS_VER(uncore->i915) == 9) {
|
||||
/* DOP Clock Gating Enable for GuC clocks */
|
||||
intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
|
||||
0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
|
||||
|
|
|
@ -9,6 +9,13 @@
|
|||
#include <linux/bits.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include "gt/intel_engine_types.h"
|
||||
|
||||
#include "abi/guc_actions_abi.h"
|
||||
#include "abi/guc_errors_abi.h"
|
||||
#include "abi/guc_communication_mmio_abi.h"
|
||||
#include "abi/guc_communication_ctb_abi.h"
|
||||
#include "abi/guc_messages_abi.h"
|
||||
|
||||
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
|
||||
#define GUC_CLIENT_PRIORITY_HIGH 1
|
||||
|
@ -26,6 +33,12 @@
|
|||
#define GUC_VIDEO_ENGINE2 4
|
||||
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
|
||||
|
||||
#define GUC_RENDER_CLASS 0
|
||||
#define GUC_VIDEO_CLASS 1
|
||||
#define GUC_VIDEOENHANCE_CLASS 2
|
||||
#define GUC_BLITTER_CLASS 3
|
||||
#define GUC_RESERVED_CLASS 4
|
||||
#define GUC_LAST_ENGINE_CLASS GUC_RESERVED_CLASS
|
||||
#define GUC_MAX_ENGINE_CLASSES 16
|
||||
#define GUC_MAX_INSTANCES_PER_CLASS 32
|
||||
|
||||
|
@ -123,6 +136,25 @@
|
|||
#define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
|
||||
(((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
|
||||
|
||||
static inline u8 engine_class_to_guc_class(u8 class)
|
||||
{
|
||||
BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS);
|
||||
BUILD_BUG_ON(GUC_BLITTER_CLASS != COPY_ENGINE_CLASS);
|
||||
BUILD_BUG_ON(GUC_VIDEO_CLASS != VIDEO_DECODE_CLASS);
|
||||
BUILD_BUG_ON(GUC_VIDEOENHANCE_CLASS != VIDEO_ENHANCEMENT_CLASS);
|
||||
GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS);
|
||||
|
||||
return class;
|
||||
}
|
||||
|
||||
static inline u8 guc_class_to_engine_class(u8 guc_class)
|
||||
{
|
||||
GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS);
|
||||
GEM_BUG_ON(guc_class == GUC_RESERVED_CLASS);
|
||||
|
||||
return guc_class;
|
||||
}
|
||||
|
||||
/* Work item for submitting workloads into work queue of GuC. */
|
||||
struct guc_wq_item {
|
||||
u32 header;
|
||||
|
@ -207,104 +239,6 @@ struct guc_stage_desc {
|
|||
u64 desc_private;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* DOC: CTB based communication
|
||||
*
|
||||
* The CTB (command transport buffer) communication between Host and GuC
|
||||
* is based on u32 data stream written to the shared buffer. One buffer can
|
||||
* be used to transmit data only in one direction (one-directional channel).
|
||||
*
|
||||
* Current status of the each buffer is stored in the buffer descriptor.
|
||||
* Buffer descriptor holds tail and head fields that represents active data
|
||||
* stream. The tail field is updated by the data producer (sender), and head
|
||||
* field is updated by the data consumer (receiver)::
|
||||
*
|
||||
* +------------+
|
||||
* | DESCRIPTOR | +=================+============+========+
|
||||
* +============+ | | MESSAGE(s) | |
|
||||
* | address |--------->+=================+============+========+
|
||||
* +------------+
|
||||
* | head | ^-----head--------^
|
||||
* +------------+
|
||||
* | tail | ^---------tail-----------------^
|
||||
* +------------+
|
||||
* | size | ^---------------size--------------------^
|
||||
* +------------+
|
||||
*
|
||||
* Each message in data stream starts with the single u32 treated as a header,
|
||||
* followed by optional set of u32 data that makes message specific payload::
|
||||
*
|
||||
* +------------+---------+---------+---------+
|
||||
* | MESSAGE |
|
||||
* +------------+---------+---------+---------+
|
||||
* | msg[0] | [1] | ... | [n-1] |
|
||||
* +------------+---------+---------+---------+
|
||||
* | MESSAGE | MESSAGE PAYLOAD |
|
||||
* + HEADER +---------+---------+---------+
|
||||
* | | 0 | ... | n |
|
||||
* +======+=====+=========+=========+=========+
|
||||
* | 31:16| code| | | |
|
||||
* +------+-----+ | | |
|
||||
* | 15:5|flags| | | |
|
||||
* +------+-----+ | | |
|
||||
* | 4:0| len| | | |
|
||||
* +------+-----+---------+---------+---------+
|
||||
*
|
||||
* ^-------------len-------------^
|
||||
*
|
||||
* The message header consists of:
|
||||
*
|
||||
* - **len**, indicates length of the message payload (in u32)
|
||||
* - **code**, indicates message code
|
||||
* - **flags**, holds various bits to control message handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* Describes single command transport buffer.
|
||||
* Used by both guc-master and clients.
|
||||
*/
|
||||
struct guc_ct_buffer_desc {
|
||||
u32 addr; /* gfx address */
|
||||
u64 host_private; /* host private data */
|
||||
u32 size; /* size in bytes */
|
||||
u32 head; /* offset updated by GuC*/
|
||||
u32 tail; /* offset updated by owner */
|
||||
u32 is_in_error; /* error indicator */
|
||||
u32 fence; /* fence updated by GuC */
|
||||
u32 status; /* status updated by GuC */
|
||||
u32 owner; /* id of the channel owner */
|
||||
u32 owner_sub_id; /* owner-defined field for extra tracking */
|
||||
u32 reserved[5];
|
||||
} __packed;
|
||||
|
||||
/* Type of command transport buffer */
|
||||
#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u
|
||||
#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u
|
||||
|
||||
/*
|
||||
* Definition of the command transport message header (DW0)
|
||||
*
|
||||
* bit[4..0] message len (in dwords)
|
||||
* bit[7..5] reserved
|
||||
* bit[8] response (G2H only)
|
||||
* bit[8] write fence to desc (H2G only)
|
||||
* bit[9] write status to H2G buff (H2G only)
|
||||
* bit[10] send status back via G2H (H2G only)
|
||||
* bit[15..11] reserved
|
||||
* bit[31..16] action code
|
||||
*/
|
||||
#define GUC_CT_MSG_LEN_SHIFT 0
|
||||
#define GUC_CT_MSG_LEN_MASK 0x1F
|
||||
#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
|
||||
#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
|
||||
#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
|
||||
#define GUC_CT_MSG_SEND_STATUS (1 << 10)
|
||||
#define GUC_CT_MSG_ACTION_SHIFT 16
|
||||
#define GUC_CT_MSG_ACTION_MASK 0xFFFF
|
||||
|
||||
#define GUC_FORCEWAKE_RENDER (1 << 0)
|
||||
#define GUC_FORCEWAKE_MEDIA (1 << 1)
|
||||
|
||||
#define GUC_POWER_UNSPECIFIED 0
|
||||
#define GUC_POWER_D0 1
|
||||
#define GUC_POWER_D1 2
|
||||
|
@ -480,120 +414,17 @@ struct guc_shared_ctx_data {
|
|||
struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* DOC: MMIO based communication
|
||||
*
|
||||
* The MMIO based communication between Host and GuC uses software scratch
|
||||
* registers, where first register holds data treated as message header,
|
||||
* and other registers are used to hold message payload.
|
||||
*
|
||||
* For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
|
||||
* but no H2G command takes more than 8 parameters and the GuC FW
|
||||
* itself uses an 8-element array to store the H2G message.
|
||||
*
|
||||
* +-----------+---------+---------+---------+
|
||||
* | MMIO[0] | MMIO[1] | ... | MMIO[n] |
|
||||
* +-----------+---------+---------+---------+
|
||||
* | header | optional payload |
|
||||
* +======+====+=========+=========+=========+
|
||||
* | 31:28|type| | | |
|
||||
* +------+----+ | | |
|
||||
* | 27:16|data| | | |
|
||||
* +------+----+ | | |
|
||||
* | 15:0|code| | | |
|
||||
* +------+----+---------+---------+---------+
|
||||
*
|
||||
* The message header consists of:
|
||||
*
|
||||
* - **type**, indicates message type
|
||||
* - **code**, indicates message code, is specific for **type**
|
||||
* - **data**, indicates message data, optional, depends on **code**
|
||||
*
|
||||
* The following message **types** are supported:
|
||||
*
|
||||
* - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
|
||||
* must be priovided in **code** field. Optional action specific parameters
|
||||
* can be provided in remaining payload registers or **data** field.
|
||||
*
|
||||
* - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
|
||||
* action response status will be provided in **code** field. Optional
|
||||
* response data can be returned in remaining payload registers or **data**
|
||||
* field.
|
||||
*/
|
||||
|
||||
#define GUC_MAX_MMIO_MSG_LEN 8
|
||||
|
||||
#define INTEL_GUC_MSG_TYPE_SHIFT 28
|
||||
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
|
||||
#define INTEL_GUC_MSG_DATA_SHIFT 16
|
||||
#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
|
||||
#define INTEL_GUC_MSG_CODE_SHIFT 0
|
||||
#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
|
||||
|
||||
#define __INTEL_GUC_MSG_GET(T, m) \
|
||||
(((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT)
|
||||
#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m)
|
||||
#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m)
|
||||
#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m)
|
||||
|
||||
enum intel_guc_msg_type {
|
||||
INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
|
||||
INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
|
||||
};
|
||||
|
||||
#define __INTEL_GUC_MSG_TYPE_IS(T, m) \
|
||||
(INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T)
|
||||
#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m)
|
||||
#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m)
|
||||
|
||||
enum intel_guc_action {
|
||||
INTEL_GUC_ACTION_DEFAULT = 0x0,
|
||||
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
|
||||
INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
|
||||
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
|
||||
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
|
||||
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
|
||||
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
|
||||
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
|
||||
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
|
||||
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
|
||||
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
|
||||
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005,
|
||||
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
|
||||
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
|
||||
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
|
||||
INTEL_GUC_ACTION_LIMIT
|
||||
};
|
||||
|
||||
enum intel_guc_preempt_options {
|
||||
INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
|
||||
INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
|
||||
};
|
||||
|
||||
enum intel_guc_report_status {
|
||||
INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
|
||||
INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
|
||||
INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
|
||||
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
|
||||
};
|
||||
|
||||
enum intel_guc_sleep_state_status {
|
||||
INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
|
||||
INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
|
||||
INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
|
||||
#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
|
||||
};
|
||||
|
||||
#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
|
||||
#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
|
||||
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
|
||||
#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
|
||||
|
||||
enum intel_guc_response_status {
|
||||
INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
|
||||
INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
|
||||
};
|
||||
|
||||
#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \
|
||||
(typecheck(u32, (m)) && \
|
||||
((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \
|
||||
|
|
|
@ -432,32 +432,6 @@ void intel_guc_submission_fini(struct intel_guc *guc)
|
|||
}
|
||||
}
|
||||
|
||||
static void guc_interrupts_capture(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
|
||||
u32 dmask = irqs << 16 | irqs;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
|
||||
|
||||
/* Don't handle the ctx switch interrupt in GuC submission mode */
|
||||
intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
|
||||
intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
|
||||
}
|
||||
|
||||
static void guc_interrupts_release(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
|
||||
u32 dmask = irqs << 16 | irqs;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
|
||||
|
||||
/* Handle ctx switch interrupts again */
|
||||
intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
|
||||
intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
|
||||
}
|
||||
|
||||
static int guc_context_alloc(struct intel_context *ce)
|
||||
{
|
||||
return lrc_alloc(ce, ce->engine);
|
||||
|
@ -648,7 +622,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
|
|||
engine->emit_flush = gen8_emit_flush_xcs;
|
||||
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
|
||||
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
|
||||
if (INTEL_GEN(engine->i915) >= 12) {
|
||||
if (GRAPHICS_VER(engine->i915) >= 12) {
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
|
||||
engine->emit_flush = gen12_emit_flush_xcs;
|
||||
}
|
||||
|
@ -670,7 +644,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
|
|||
|
||||
static void rcs_submission_override(struct intel_engine_cs *engine)
|
||||
{
|
||||
switch (INTEL_GEN(engine->i915)) {
|
||||
switch (GRAPHICS_VER(engine->i915)) {
|
||||
case 12:
|
||||
engine->emit_flush = gen12_emit_flush_rcs;
|
||||
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
|
||||
|
@ -700,7 +674,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
|
|||
* The setup relies on several assumptions (e.g. irqs always enabled)
|
||||
* that are only valid on gen11+
|
||||
*/
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 11);
|
||||
GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
|
||||
|
||||
tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
|
||||
|
||||
|
@ -722,9 +696,6 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
|
|||
void intel_guc_submission_enable(struct intel_guc *guc)
|
||||
{
|
||||
guc_stage_desc_init(guc);
|
||||
|
||||
/* Take over from manual control of ELSP (execlists) */
|
||||
guc_interrupts_capture(guc_to_gt(guc));
|
||||
}
|
||||
|
||||
void intel_guc_submission_disable(struct intel_guc *guc)
|
||||
|
@ -735,8 +706,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
|
|||
|
||||
/* Note: By the time we're here, GuC may have already been reset */
|
||||
|
||||
guc_interrupts_release(gt);
|
||||
|
||||
guc_stage_desc_fini(guc);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ void intel_huc_init_early(struct intel_huc *huc)
|
|||
|
||||
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (GRAPHICS_VER(i915) >= 11) {
|
||||
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
|
||||
huc->status.mask = HUC_LOAD_SUCCESSFUL;
|
||||
huc->status.value = HUC_LOAD_SUCCESSFUL;
|
||||
|
|
|
@ -23,7 +23,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
|
|||
return;
|
||||
|
||||
/* Don't enable GuC/HuC on pre-Gen12 */
|
||||
if (INTEL_GEN(i915) < 12) {
|
||||
if (GRAPHICS_VER(i915) < 12) {
|
||||
i915->params.enable_guc = 0;
|
||||
return;
|
||||
}
|
||||
|
@ -467,7 +467,7 @@ static int __uc_init_hw(struct intel_uc *uc)
|
|||
|
||||
/* WaEnableuKernelHeaderValidFix:skl */
|
||||
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
|
||||
if (IS_GEN(i915, 9))
|
||||
if (GRAPHICS_VER(i915) == 9)
|
||||
attempts = 3;
|
||||
else
|
||||
attempts = 1;
|
||||
|
@ -502,10 +502,6 @@ static int __uc_init_hw(struct intel_uc *uc)
|
|||
|
||||
intel_huc_auth(huc);
|
||||
|
||||
ret = intel_guc_sample_forcewake(guc);
|
||||
if (ret)
|
||||
goto err_communication;
|
||||
|
||||
if (intel_uc_uses_guc_submission(uc))
|
||||
intel_guc_submission_enable(guc);
|
||||
|
||||
|
@ -529,8 +525,6 @@ static int __uc_init_hw(struct intel_uc *uc)
|
|||
/*
|
||||
* We've failed to load the firmware :(
|
||||
*/
|
||||
err_communication:
|
||||
guc_disable_communication(guc);
|
||||
err_log_capture:
|
||||
__uc_capture_load_err_log(uc);
|
||||
err_out:
|
||||
|
@ -558,9 +552,6 @@ static void __uc_fini_hw(struct intel_uc *uc)
|
|||
if (intel_uc_uses_guc_submission(uc))
|
||||
intel_guc_submission_disable(guc);
|
||||
|
||||
if (guc_communication_enabled(guc))
|
||||
guc_disable_communication(guc);
|
||||
|
||||
__uc_sanitize(uc);
|
||||
}
|
||||
|
||||
|
@ -577,7 +568,6 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
|
|||
if (!intel_guc_is_ready(guc))
|
||||
return;
|
||||
|
||||
guc_disable_communication(guc);
|
||||
__uc_sanitize(uc);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,435 +0,0 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "i915_buddy.h"
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_globals.h"
|
||||
#include "i915_utils.h"
|
||||
|
||||
static struct i915_global_block {
|
||||
struct i915_global base;
|
||||
struct kmem_cache *slab_blocks;
|
||||
} global;
|
||||
|
||||
static void i915_global_buddy_shrink(void)
|
||||
{
|
||||
kmem_cache_shrink(global.slab_blocks);
|
||||
}
|
||||
|
||||
static void i915_global_buddy_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(global.slab_blocks);
|
||||
}
|
||||
|
||||
static struct i915_global_block global = { {
|
||||
.shrink = i915_global_buddy_shrink,
|
||||
.exit = i915_global_buddy_exit,
|
||||
} };
|
||||
|
||||
int __init i915_global_buddy_init(void)
|
||||
{
|
||||
global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
|
||||
if (!global.slab_blocks)
|
||||
return -ENOMEM;
|
||||
|
||||
i915_global_register(&global.base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
|
||||
unsigned int order,
|
||||
u64 offset)
|
||||
{
|
||||
struct i915_buddy_block *block;
|
||||
|
||||
GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
|
||||
|
||||
block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
|
||||
if (!block)
|
||||
return NULL;
|
||||
|
||||
block->header = offset;
|
||||
block->header |= order;
|
||||
block->parent = parent;
|
||||
|
||||
GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
|
||||
return block;
|
||||
}
|
||||
|
||||
static void i915_block_free(struct i915_buddy_block *block)
|
||||
{
|
||||
kmem_cache_free(global.slab_blocks, block);
|
||||
}
|
||||
|
||||
static void mark_allocated(struct i915_buddy_block *block)
|
||||
{
|
||||
block->header &= ~I915_BUDDY_HEADER_STATE;
|
||||
block->header |= I915_BUDDY_ALLOCATED;
|
||||
|
||||
list_del(&block->link);
|
||||
}
|
||||
|
||||
static void mark_free(struct i915_buddy_mm *mm,
|
||||
struct i915_buddy_block *block)
|
||||
{
|
||||
block->header &= ~I915_BUDDY_HEADER_STATE;
|
||||
block->header |= I915_BUDDY_FREE;
|
||||
|
||||
list_add(&block->link,
|
||||
&mm->free_list[i915_buddy_block_order(block)]);
|
||||
}
|
||||
|
||||
static void mark_split(struct i915_buddy_block *block)
|
||||
{
|
||||
block->header &= ~I915_BUDDY_HEADER_STATE;
|
||||
block->header |= I915_BUDDY_SPLIT;
|
||||
|
||||
list_del(&block->link);
|
||||
}
|
||||
|
||||
int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 offset;
|
||||
|
||||
if (size < chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (chunk_size < PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(chunk_size))
|
||||
return -EINVAL;
|
||||
|
||||
size = round_down(size, chunk_size);
|
||||
|
||||
mm->size = size;
|
||||
mm->chunk_size = chunk_size;
|
||||
mm->max_order = ilog2(size) - ilog2(chunk_size);
|
||||
|
||||
GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
|
||||
|
||||
mm->free_list = kmalloc_array(mm->max_order + 1,
|
||||
sizeof(struct list_head),
|
||||
GFP_KERNEL);
|
||||
if (!mm->free_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i <= mm->max_order; ++i)
|
||||
INIT_LIST_HEAD(&mm->free_list[i]);
|
||||
|
||||
mm->n_roots = hweight64(size);
|
||||
|
||||
mm->roots = kmalloc_array(mm->n_roots,
|
||||
sizeof(struct i915_buddy_block *),
|
||||
GFP_KERNEL);
|
||||
if (!mm->roots)
|
||||
goto out_free_list;
|
||||
|
||||
offset = 0;
|
||||
i = 0;
|
||||
|
||||
/*
|
||||
* Split into power-of-two blocks, in case we are given a size that is
|
||||
* not itself a power-of-two.
|
||||
*/
|
||||
do {
|
||||
struct i915_buddy_block *root;
|
||||
unsigned int order;
|
||||
u64 root_size;
|
||||
|
||||
root_size = rounddown_pow_of_two(size);
|
||||
order = ilog2(root_size) - ilog2(chunk_size);
|
||||
|
||||
root = i915_block_alloc(NULL, order, offset);
|
||||
if (!root)
|
||||
goto out_free_roots;
|
||||
|
||||
mark_free(mm, root);
|
||||
|
||||
GEM_BUG_ON(i > mm->max_order);
|
||||
GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
|
||||
|
||||
mm->roots[i] = root;
|
||||
|
||||
offset += root_size;
|
||||
size -= root_size;
|
||||
i++;
|
||||
} while (size);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_roots:
|
||||
while (i--)
|
||||
i915_block_free(mm->roots[i]);
|
||||
kfree(mm->roots);
|
||||
out_free_list:
|
||||
kfree(mm->free_list);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void i915_buddy_fini(struct i915_buddy_mm *mm)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mm->n_roots; ++i) {
|
||||
GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
|
||||
i915_block_free(mm->roots[i]);
|
||||
}
|
||||
|
||||
kfree(mm->roots);
|
||||
kfree(mm->free_list);
|
||||
}
|
||||
|
||||
static int split_block(struct i915_buddy_mm *mm,
|
||||
struct i915_buddy_block *block)
|
||||
{
|
||||
unsigned int block_order = i915_buddy_block_order(block) - 1;
|
||||
u64 offset = i915_buddy_block_offset(block);
|
||||
|
||||
GEM_BUG_ON(!i915_buddy_block_is_free(block));
|
||||
GEM_BUG_ON(!i915_buddy_block_order(block));
|
||||
|
||||
block->left = i915_block_alloc(block, block_order, offset);
|
||||
if (!block->left)
|
||||
return -ENOMEM;
|
||||
|
||||
block->right = i915_block_alloc(block, block_order,
|
||||
offset + (mm->chunk_size << block_order));
|
||||
if (!block->right) {
|
||||
i915_block_free(block->left);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mark_free(mm, block->left);
|
||||
mark_free(mm, block->right);
|
||||
|
||||
mark_split(block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_buddy_block *
|
||||
get_buddy(struct i915_buddy_block *block)
|
||||
{
|
||||
struct i915_buddy_block *parent;
|
||||
|
||||
parent = block->parent;
|
||||
if (!parent)
|
||||
return NULL;
|
||||
|
||||
if (parent->left == block)
|
||||
return parent->right;
|
||||
|
||||
return parent->left;
|
||||
}
|
||||
|
||||
static void __i915_buddy_free(struct i915_buddy_mm *mm,
|
||||
struct i915_buddy_block *block)
|
||||
{
|
||||
struct i915_buddy_block *parent;
|
||||
|
||||
while ((parent = block->parent)) {
|
||||
struct i915_buddy_block *buddy;
|
||||
|
||||
buddy = get_buddy(block);
|
||||
|
||||
if (!i915_buddy_block_is_free(buddy))
|
||||
break;
|
||||
|
||||
list_del(&buddy->link);
|
||||
|
||||
i915_block_free(block);
|
||||
i915_block_free(buddy);
|
||||
|
||||
block = parent;
|
||||
}
|
||||
|
||||
mark_free(mm, block);
|
||||
}
|
||||
|
||||
void i915_buddy_free(struct i915_buddy_mm *mm,
|
||||
struct i915_buddy_block *block)
|
||||
{
|
||||
GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
|
||||
__i915_buddy_free(mm, block);
|
||||
}
|
||||
|
||||
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
|
||||
{
|
||||
struct i915_buddy_block *block, *on;
|
||||
|
||||
list_for_each_entry_safe(block, on, objects, link) {
|
||||
i915_buddy_free(mm, block);
|
||||
cond_resched();
|
||||
}
|
||||
INIT_LIST_HEAD(objects);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate power-of-two block. The order value here translates to:
|
||||
*
|
||||
* 0 = 2^0 * mm->chunk_size
|
||||
* 1 = 2^1 * mm->chunk_size
|
||||
* 2 = 2^2 * mm->chunk_size
|
||||
* ...
|
||||
*/
|
||||
struct i915_buddy_block *
|
||||
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
|
||||
{
|
||||
struct i915_buddy_block *block = NULL;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
for (i = order; i <= mm->max_order; ++i) {
|
||||
block = list_first_entry_or_null(&mm->free_list[i],
|
||||
struct i915_buddy_block,
|
||||
link);
|
||||
if (block)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!block)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
GEM_BUG_ON(!i915_buddy_block_is_free(block));
|
||||
|
||||
while (i != order) {
|
||||
err = split_block(mm, block);
|
||||
if (unlikely(err))
|
||||
goto out_free;
|
||||
|
||||
/* Go low */
|
||||
block = block->left;
|
||||
i--;
|
||||
}
|
||||
|
||||
mark_allocated(block);
|
||||
kmemleak_update_trace(block);
|
||||
return block;
|
||||
|
||||
out_free:
|
||||
if (i != order)
|
||||
__i915_buddy_free(mm, block);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
|
||||
{
|
||||
return s1 <= e2 && e1 >= s2;
|
||||
}
|
||||
|
||||
static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
|
||||
{
|
||||
return s1 <= s2 && e1 >= e2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate range. Note that it's safe to chain together multiple alloc_ranges
|
||||
* with the same blocks list.
|
||||
*
|
||||
* Intended for pre-allocating portions of the address space, for example to
|
||||
* reserve a block for the initial framebuffer or similar, hence the expectation
|
||||
* here is that i915_buddy_alloc() is still the main vehicle for
|
||||
* allocations, so if that's not the case then the drm_mm range allocator is
|
||||
* probably a much better fit, and so you should probably go use that instead.
|
||||
*/
|
||||
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
|
||||
struct list_head *blocks,
|
||||
u64 start, u64 size)
|
||||
{
|
||||
struct i915_buddy_block *block;
|
||||
struct i915_buddy_block *buddy;
|
||||
LIST_HEAD(allocated);
|
||||
LIST_HEAD(dfs);
|
||||
u64 end;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (size < mm->chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(size | start, mm->chunk_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (range_overflows(start, size, mm->size))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < mm->n_roots; ++i)
|
||||
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
|
||||
|
||||
end = start + size - 1;
|
||||
|
||||
do {
|
||||
u64 block_start;
|
||||
u64 block_end;
|
||||
|
||||
block = list_first_entry_or_null(&dfs,
|
||||
struct i915_buddy_block,
|
||||
tmp_link);
|
||||
if (!block)
|
||||
break;
|
||||
|
||||
list_del(&block->tmp_link);
|
||||
|
||||
block_start = i915_buddy_block_offset(block);
|
||||
block_end = block_start + i915_buddy_block_size(mm, block) - 1;
|
||||
|
||||
if (!overlaps(start, end, block_start, block_end))
|
||||
continue;
|
||||
|
||||
if (i915_buddy_block_is_allocated(block)) {
|
||||
err = -ENOSPC;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (contains(start, end, block_start, block_end)) {
|
||||
if (!i915_buddy_block_is_free(block)) {
|
||||
err = -ENOSPC;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
mark_allocated(block);
|
||||
list_add_tail(&block->link, &allocated);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!i915_buddy_block_is_split(block)) {
|
||||
err = split_block(mm, block);
|
||||
if (unlikely(err))
|
||||
goto err_undo;
|
||||
}
|
||||
|
||||
list_add(&block->right->tmp_link, &dfs);
|
||||
list_add(&block->left->tmp_link, &dfs);
|
||||
} while (1);
|
||||
|
||||
list_splice_tail(&allocated, blocks);
|
||||
return 0;
|
||||
|
||||
err_undo:
|
||||
/*
|
||||
* We really don't want to leave around a bunch of split blocks, since
|
||||
* bigger is better, so make sure we merge everything back before we
|
||||
* free the allocated blocks.
|
||||
*/
|
||||
buddy = get_buddy(block);
|
||||
if (buddy &&
|
||||
(i915_buddy_block_is_free(block) &&
|
||||
i915_buddy_block_is_free(buddy)))
|
||||
__i915_buddy_free(mm, block);
|
||||
|
||||
err_free:
|
||||
i915_buddy_free_list(mm, &allocated);
|
||||
return err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/i915_buddy.c"
|
||||
#endif
|
|
@ -1,131 +0,0 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __I915_BUDDY_H__
|
||||
#define __I915_BUDDY_H__
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct i915_buddy_block {
|
||||
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
|
||||
#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
|
||||
#define I915_BUDDY_ALLOCATED (1 << 10)
|
||||
#define I915_BUDDY_FREE (2 << 10)
|
||||
#define I915_BUDDY_SPLIT (3 << 10)
|
||||
/* Free to be used, if needed in the future */
|
||||
#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
|
||||
#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
|
||||
u64 header;
|
||||
|
||||
struct i915_buddy_block *left;
|
||||
struct i915_buddy_block *right;
|
||||
struct i915_buddy_block *parent;
|
||||
|
||||
void *private; /* owned by creator */
|
||||
|
||||
/*
|
||||
* While the block is allocated by the user through i915_buddy_alloc*,
|
||||
* the user has ownership of the link, for example to maintain within
|
||||
* a list, if so desired. As soon as the block is freed with
|
||||
* i915_buddy_free* ownership is given back to the mm.
|
||||
*/
|
||||
struct list_head link;
|
||||
struct list_head tmp_link;
|
||||
};
|
||||
|
||||
/* Order-zero must be at least PAGE_SIZE */
|
||||
#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Binary Buddy System.
|
||||
*
|
||||
* Locking should be handled by the user, a simple mutex around
|
||||
* i915_buddy_alloc* and i915_buddy_free* should suffice.
|
||||
*/
|
||||
struct i915_buddy_mm {
|
||||
/* Maintain a free list for each order. */
|
||||
struct list_head *free_list;
|
||||
|
||||
/*
|
||||
* Maintain explicit binary tree(s) to track the allocation of the
|
||||
* address space. This gives us a simple way of finding a buddy block
|
||||
* and performing the potentially recursive merge step when freeing a
|
||||
* block. Nodes are either allocated or free, in which case they will
|
||||
* also exist on the respective free list.
|
||||
*/
|
||||
struct i915_buddy_block **roots;
|
||||
|
||||
/*
|
||||
* Anything from here is public, and remains static for the lifetime of
|
||||
* the mm. Everything above is considered do-not-touch.
|
||||
*/
|
||||
unsigned int n_roots;
|
||||
unsigned int max_order;
|
||||
|
||||
/* Must be at least PAGE_SIZE */
|
||||
u64 chunk_size;
|
||||
u64 size;
|
||||
};
|
||||
|
||||
static inline u64
|
||||
i915_buddy_block_offset(struct i915_buddy_block *block)
|
||||
{
|
||||
return block->header & I915_BUDDY_HEADER_OFFSET;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_buddy_block_order(struct i915_buddy_block *block)
|
||||
{
|
||||
return block->header & I915_BUDDY_HEADER_ORDER;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_buddy_block_state(struct i915_buddy_block *block)
|
||||
{
|
||||
return block->header & I915_BUDDY_HEADER_STATE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_buddy_block_is_allocated(struct i915_buddy_block *block)
|
||||
{
|
||||
return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_buddy_block_is_free(struct i915_buddy_block *block)
|
||||
{
|
||||
return i915_buddy_block_state(block) == I915_BUDDY_FREE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_buddy_block_is_split(struct i915_buddy_block *block)
|
||||
{
|
||||
return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
|
||||
}
|
||||
|
||||
static inline u64
|
||||
i915_buddy_block_size(struct i915_buddy_mm *mm,
|
||||
struct i915_buddy_block *block)
|
||||
{
|
||||
return mm->chunk_size << i915_buddy_block_order(block);
|
||||
}
|
||||
|
||||
int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
|
||||
|
||||
void i915_buddy_fini(struct i915_buddy_mm *mm);
|
||||
|
||||
struct i915_buddy_block *
|
||||
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
|
||||
|
||||
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
|
||||
struct list_head *blocks,
|
||||
u64 start, u64 size);
|
||||
|
||||
void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
|
||||
|
||||
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
|
||||
|
||||
#endif
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче