Merge tag 'drm-intel-next-2018-09-21' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Driver Changes:

- Bugzilla 107600: Fix stuttering video playback on MythTV on old hardware (Chris)
- Avoid black screen when using CSC coefficient matrix (Raviraj)
- Hammer PDs on Baytrail to make sure they reload (Chris)
- Capture some objects if unable to capture all, on error (Chris)
- Add W/A for 16 GB DIMMs on SKL+ (Mahesh)
- Only enable IPC for symmetric memory configurations on KBL+ (Mahesh)
- Assume pipe A to have maximum stride limits (Ville)
- Always update update OA contexts via context image (Tvrtko)
- Icelake enabling patches (Madhav, Dhinakaran)
- Add Icelake DMC firmware (Anusha)
- Fixes for CI found corner cases (Chris)
- Limit the backpressure for request allocation (Chris)
- Park GPU on module load so usage starts from known state (Chris)
- Flush tasklet when checking for idle (Chris)
- Use coherent write into the context image on BSW+ (Chris)
- Fix possible integer overflow for framebuffers that get aligned past 4GiB (Ville)
- Downgrade fence timeout from warn to notice and add debug hint (Chris)

- Fixes to multi function encoder code (Ville)
- Fix sprite plane check logic (Dan, Ville)
- PAGE_SIZE vs. I915_GTT_PAGE_SIZE fixes (Ville)
- Decode memory bandwidth and parameters for BXT and SKL+ (Mahesh)
- Overwrite BIOS set IPC value from KMS (Mahesh)
- Multiple pipe handling code cleanups/restructurings/optimizations (Ville)
- Spare low 4G address for non-48bit objects (Chris)
- Free context_setparam of struct_mutex (Chris)
- Delay updating ring register state on resume (Chris)
- Avoid unnecessarily copying overlay IOCTL parameters (Chris)
- Update GuC power domain states even without submission (Michal)
- Restore GuC preempt-context across S3/S4 (Chris)
- Add kernel selftest for rapid context switching (Chris)
- Keep runtime power management ref for live selftests (Chris)
- GEM code cleanups (Matt)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180927095933.GA11458@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2018-09-28 09:37:51 +10:00
Родитель 156e60bc71 448626103d
Коммит db9825c954
39 изменённых файлов: 1695 добавлений и 959 удалений

Просмотреть файл

@ -4117,6 +4117,17 @@ i915_ring_test_irq_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
/* GuC keeps the user interrupt permanently enabled for submission */
if (USES_GUC_SUBMISSION(i915))
return -ENODEV;
/*
* From icl, we can no longer individually mask interrupt generation
* from each engine.
*/
if (INTEL_GEN(i915) >= 11)
return -ENODEV;
val &= INTEL_INFO(i915)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);

Просмотреть файл

@ -1063,6 +1063,300 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
{
if (size == 0)
return I915_DRAM_RANK_INVALID;
if (rank == SKL_DRAM_RANK_SINGLE)
return I915_DRAM_RANK_SINGLE;
else if (rank == SKL_DRAM_RANK_DUAL)
return I915_DRAM_RANK_DUAL;
return I915_DRAM_RANK_INVALID;
}
static bool
skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
{
if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
return true;
else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
return true;
else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
return true;
else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
return true;
return false;
}
static int
skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
{
u32 tmp_l, tmp_s;
u32 s_val = val >> SKL_DRAM_S_SHIFT;
if (!val)
return -EINVAL;
tmp_l = val & SKL_DRAM_SIZE_MASK;
tmp_s = s_val & SKL_DRAM_SIZE_MASK;
if (tmp_l == 0 && tmp_s == 0)
return -EINVAL;
ch->l_info.size = tmp_l;
ch->s_info.size = tmp_s;
tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
ch->l_info.width = (1 << tmp_l) * 8;
ch->s_info.width = (1 << tmp_s) * 8;
tmp_l = val & SKL_DRAM_RANK_MASK;
tmp_s = s_val & SKL_DRAM_RANK_MASK;
ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
ch->s_info.rank == I915_DRAM_RANK_DUAL)
ch->rank = I915_DRAM_RANK_DUAL;
else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
ch->s_info.rank == I915_DRAM_RANK_SINGLE)
ch->rank = I915_DRAM_RANK_DUAL;
else
ch->rank = I915_DRAM_RANK_SINGLE;
ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
ch->l_info.width) ||
skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
ch->s_info.width);
DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
ch->l_info.size, ch->l_info.width,
ch->l_info.rank ? "dual" : "single",
ch->s_info.size, ch->s_info.width,
ch->s_info.rank ? "dual" : "single");
return 0;
}
static bool
intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
struct dram_channel_info *ch0)
{
return (val_ch0 == val_ch1 &&
(ch0->s_info.size == 0 ||
(ch0->l_info.size == ch0->s_info.size &&
ch0->l_info.width == ch0->s_info.width &&
ch0->l_info.rank == ch0->s_info.rank)));
}
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
struct dram_channel_info ch0, ch1;
u32 val_ch0, val_ch1;
int ret;
val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(&ch0, val_ch0);
if (ret == 0)
dram_info->num_channels++;
val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(&ch1, val_ch1);
if (ret == 0)
dram_info->num_channels++;
if (dram_info->num_channels == 0) {
DRM_INFO("Number of memory channels is zero\n");
return -EINVAL;
}
dram_info->valid_dimm = true;
/*
* If any of the channel is single rank channel, worst case output
* will be same as if single rank memory, so consider single rank
* memory.
*/
if (ch0.rank == I915_DRAM_RANK_SINGLE ||
ch1.rank == I915_DRAM_RANK_SINGLE)
dram_info->rank = I915_DRAM_RANK_SINGLE;
else
dram_info->rank = max(ch0.rank, ch1.rank);
if (dram_info->rank == I915_DRAM_RANK_INVALID) {
DRM_INFO("couldn't get memory rank information\n");
return -EINVAL;
}
if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
dram_info->is_16gb_dimm = true;
dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
val_ch1,
&ch0);
DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
dev_priv->dram_info.symmetric_memory ? "" : "not ");
return 0;
}
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
u32 mem_freq_khz, val;
int ret;
ret = skl_dram_get_channels_info(dev_priv);
if (ret)
return ret;
val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
dram_info->bandwidth_kbps = dram_info->num_channels *
mem_freq_khz * 8;
if (dram_info->bandwidth_kbps == 0) {
DRM_INFO("Couldn't get system memory bandwidth\n");
return -EINVAL;
}
dram_info->valid = true;
return 0;
}
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
u32 dram_channels;
u32 mem_freq_khz, val;
u8 num_active_channels;
int i;
val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
num_active_channels = hweight32(dram_channels);
/* Each active bit represents 4-byte channel */
dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
if (dram_info->bandwidth_kbps == 0) {
DRM_INFO("Couldn't get system memory bandwidth\n");
return -EINVAL;
}
/*
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
u8 size, width;
enum dram_rank rank;
u32 tmp;
val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
if (val == 0xFFFFFFFF)
continue;
dram_info->num_channels++;
tmp = val & BXT_DRAM_RANK_MASK;
if (tmp == BXT_DRAM_RANK_SINGLE)
rank = I915_DRAM_RANK_SINGLE;
else if (tmp == BXT_DRAM_RANK_DUAL)
rank = I915_DRAM_RANK_DUAL;
else
rank = I915_DRAM_RANK_INVALID;
tmp = val & BXT_DRAM_SIZE_MASK;
if (tmp == BXT_DRAM_SIZE_4GB)
size = 4;
else if (tmp == BXT_DRAM_SIZE_6GB)
size = 6;
else if (tmp == BXT_DRAM_SIZE_8GB)
size = 8;
else if (tmp == BXT_DRAM_SIZE_12GB)
size = 12;
else if (tmp == BXT_DRAM_SIZE_16GB)
size = 16;
else
size = 0;
tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
width = (1 << tmp) * 8;
DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
width, rank == I915_DRAM_RANK_SINGLE ? "single" :
rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
/*
* If any of the channel is single rank channel,
* worst case output will be same as if single rank
* memory, so consider single rank memory.
*/
if (dram_info->rank == I915_DRAM_RANK_INVALID)
dram_info->rank = rank;
else if (rank == I915_DRAM_RANK_SINGLE)
dram_info->rank = I915_DRAM_RANK_SINGLE;
}
if (dram_info->rank == I915_DRAM_RANK_INVALID) {
DRM_INFO("couldn't get memory rank information\n");
return -EINVAL;
}
dram_info->valid_dimm = true;
dram_info->valid = true;
return 0;
}
static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
char bandwidth_str[32];
int ret;
dram_info->valid = false;
dram_info->valid_dimm = false;
dram_info->is_16gb_dimm = false;
dram_info->rank = I915_DRAM_RANK_INVALID;
dram_info->bandwidth_kbps = 0;
dram_info->num_channels = 0;
if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
return;
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
else if (INTEL_GEN(dev_priv) == 9)
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
if (ret)
return;
if (dram_info->bandwidth_kbps)
sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
else
sprintf(bandwidth_str, "unknown");
DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
bandwidth_str, dram_info->num_channels);
DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
(dram_info->rank == I915_DRAM_RANK_DUAL) ?
"dual" : "single", yesno(dram_info->is_16gb_dimm));
}
/**
* i915_driver_init_hw - setup state requiring device access
* @dev_priv: device private
@ -1180,6 +1474,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
goto err_msi;
intel_opregion_setup(dev_priv);
/*
* Fill the dram structure to get the system raw bandwidth and
* dram info. This will be used for memory latency calculation.
*/
intel_get_dram_info(dev_priv);
return 0;

Просмотреть файл

@ -87,8 +87,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20180906"
#define DRIVER_TIMESTAMP 1536242083
#define DRIVER_DATE "20180921"
#define DRIVER_TIMESTAMP 1537521997
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -1946,6 +1946,20 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
struct dram_info {
bool valid;
bool valid_dimm;
bool is_16gb_dimm;
u8 num_channels;
enum dram_rank {
I915_DRAM_RANK_INVALID = 0,
I915_DRAM_RANK_SINGLE,
I915_DRAM_RANK_DUAL
} rank;
u32 bandwidth_kbps;
bool symmetric_memory;
} dram_info;
struct i915_runtime_pm runtime_pm;
struct {
@ -2159,6 +2173,15 @@ struct drm_i915_private {
*/
};
struct dram_channel_info {
struct info {
u8 size, width;
enum dram_rank rank;
} l_info, s_info;
enum dram_rank rank;
bool is_16gb_dimm;
};
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@ -2284,7 +2307,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#define for_each_sgt_dma(__dmap, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
((__dmap) = (__iter).dma + (__iter).curr); \
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
@ -3074,6 +3097,12 @@ enum i915_map_type {
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
@ -3311,7 +3340,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);

Просмотреть файл

@ -2506,7 +2506,9 @@ static bool i915_sg_trim(struct sg_table *orig_st)
new_sg = new_st.sgl;
for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, 0);
/* called before being DMA mapped, no need to copy sg->dma_* */
sg_dma_address(new_sg) = sg_dma_address(sg);
sg_dma_len(new_sg) = sg_dma_len(sg);
new_sg = sg_next(new_sg);
}
GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
@ -3438,6 +3440,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
if (!intel_gpu_reset(i915, ALL_ENGINES))
intel_engines_sanitize(i915);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
* being queued (by disallowing execbuf whilst wedged) so having
@ -5414,8 +5419,19 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
assert_kernel_context_is_current(i915);
/*
* Immediately park the GPU so that we enable powersaving and
* treat it as idle. The next time we issue a request, we will
* unpark and start using the engine->pinned_default_state, otherwise
* it is in limbo and an early reset may fail.
*/
__i915_gem_park(i915);
for_each_engine(engine, i915, id) {
struct i915_vma *state;
void *vaddr;
GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
state = to_intel_context(ctx, engine)->state;
if (!state)
@ -5438,6 +5454,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state,
I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_active;
}
i915_gem_object_unpin_map(engine->default_state);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {

Просмотреть файл

@ -862,7 +862,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
@ -896,27 +896,23 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx;
int ret;
int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (!ctx)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto out;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size) {
if (args->size)
ret = -EINVAL;
} else {
ctx->flags &= ~CONTEXT_NO_ZEROMAP;
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
}
else if (args->value)
set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
else
clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
if (args->size)
@ -960,9 +956,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
}
mutex_unlock(&dev->struct_mutex);
out:
i915_gem_context_put(ctx);
return ret;
}

Просмотреть файл

@ -116,16 +116,21 @@ struct i915_gem_context {
*/
struct rcu_head rcu;
/**
* @user_flags: small set of booleans controlled by the user
*/
unsigned long user_flags;
#define UCONTEXT_NO_ZEROMAP 0
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
/**
* @flags: small set of booleans
*/
unsigned long flags;
#define CONTEXT_NO_ZEROMAP BIT(0)
#define CONTEXT_NO_ERROR_CAPTURE 1
#define CONTEXT_CLOSED 2
#define CONTEXT_BANNABLE 3
#define CONTEXT_BANNED 4
#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
#define CONTEXT_BANNED 0
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
/**
* @hw_id: - unique identifier for the context
@ -209,37 +214,37 @@ static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx
static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
{
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
__set_bit(CONTEXT_CLOSED, &ctx->flags);
set_bit(CONTEXT_CLOSED, &ctx->flags);
}
static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
{
__set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
{
__clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNABLE, &ctx->flags);
return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
{
__set_bit(CONTEXT_BANNABLE, &ctx->flags);
set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
{
__clear_bit(CONTEXT_BANNABLE, &ctx->flags);
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
@ -249,7 +254,7 @@ static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx
static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
{
__set_bit(CONTEXT_BANNED, &ctx->flags);
set_bit(CONTEXT_BANNED, &ctx->flags);
}
static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)

Просмотреть файл

@ -693,9 +693,14 @@ static int eb_reserve(struct i915_execbuffer *eb)
eb_unreserve_vma(vma, &eb->flags[i]);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
list_add(&vma->exec_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
list_add_tail(&vma->exec_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
list_add(&vma->exec_link, &last);
else
list_add_tail(&vma->exec_link, &last);
}
@ -743,7 +748,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
}
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
return 0;

Просмотреть файл

@ -1050,7 +1050,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
do {
vaddr[idx->pte] = pte_encode | iter->dma;
iter->dma += PAGE_SIZE;
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg) {
@ -1144,7 +1144,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
rem >= (max - index) << PAGE_SHIFT))
rem >= (max - index) * I915_GTT_PAGE_SIZE))
maybe_64K = true;
vaddr = kmap_atomic_px(pt);
@ -1169,7 +1169,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (maybe_64K && index < max &&
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
rem >= (max - index) << PAGE_SHIFT)))
rem >= (max - index) * I915_GTT_PAGE_SIZE)))
maybe_64K = false;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
@ -1759,7 +1759,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
pde, pte,
(pde * GEN6_PTES + pte) * PAGE_SIZE);
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
if (vaddr[pte + i] != scratch_pte)
seq_printf(m, " %08x", vaddr[pte + i]);
@ -1842,10 +1842,10 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
unsigned int first_entry = start >> PAGE_SHIFT;
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length >> PAGE_SHIFT;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
while (num_entries) {
@ -1886,7 +1886,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned first_entry = vma->node.start >> PAGE_SHIFT;
unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
@ -1899,7 +1899,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
iter.dma += PAGE_SIZE;
iter.dma += I915_GTT_PAGE_SIZE;
if (iter.dma == iter.max) {
iter.sg = __sg_next(iter.sg);
if (!iter.sg)
@ -2037,7 +2037,7 @@ static int pd_vma_bind(struct i915_vma *vma,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct gen6_hw_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
struct i915_page_table *pt;
unsigned int pde;
@ -2163,7 +2163,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.i915 = i915;
ppgtt->base.vm.dma = &i915->drm.pdev->dev;
ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
i915_address_space_init(&ppgtt->base.vm, i915);
@ -2456,7 +2456,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
@ -2480,7 +2480,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
for_each_sgt_dma(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
@ -2499,7 +2499,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
iowrite32(vm->pte_encode(addr, level, flags), pte);
@ -2519,7 +2519,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
unsigned int i = vma->node.start >> PAGE_SHIFT;
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
struct sgt_iter iter;
dma_addr_t addr;
for_each_sgt_dma(addr, iter, vma->pages)
@ -2541,8 +2541,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
const gen8_pte_t scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t __iomem *gtt_base =
@ -2657,8 +2657,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
gen6_pte_t scratch_pte, __iomem *gtt_base =
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@ -3005,7 +3005,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->iomap);
i915_gem_cleanup_stolen(&dev_priv->drm);
i915_gem_cleanup_stolen(dev_priv);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@ -3023,7 +3023,7 @@ static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
#ifdef CONFIG_X86_32
/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
if (bdw_gmch_ctl > 4)
bdw_gmch_ctl = 4;
#endif
@ -3398,7 +3398,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
@ -3456,7 +3456,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.clear_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
@ -3727,9 +3727,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
* the entries so the sg list can be happily traversed.
* The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, PAGE_SIZE, 0);
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = in[offset + src_idx];
sg_dma_len(sg) = PAGE_SIZE;
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= stride;
}
@ -3742,7 +3742,7 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
const unsigned long n_pages = obj->base.size / PAGE_SIZE;
const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
struct sgt_iter sgt_iter;
dma_addr_t dma_addr;

Просмотреть файл

@ -167,10 +167,8 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;

Просмотреть файл

@ -1365,15 +1365,20 @@ static void request_record_user_bo(struct i915_request *request,
{
struct i915_capture_list *c;
struct drm_i915_error_object **bo;
long count;
long count, max;
count = 0;
max = 0;
for (c = request->capture_list; c; c = c->next)
count++;
max++;
if (!max)
return;
bo = NULL;
if (count)
bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
if (!bo) {
/* If we can't capture everything, try to capture something. */
max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
}
if (!bo)
return;
@ -1382,7 +1387,8 @@ static void request_record_user_bo(struct i915_request *request,
bo[count] = i915_error_object_create(request->i915, c->vma);
if (!bo[count])
break;
count++;
if (++count == max)
break;
}
ee->user_bo = bo;

Просмотреть файл

@ -1679,107 +1679,6 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
}
}
/*
* Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
* is only used by the kernel context.
*/
static int gen8_emit_oa_config(struct i915_request *rq,
const struct i915_oa_config *oa_config)
{
struct drm_i915_private *dev_priv = rq->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
i915_mmio_reg_offset(EU_PERF_CNTL1),
i915_mmio_reg_offset(EU_PERF_CNTL2),
i915_mmio_reg_offset(EU_PERF_CNTL3),
i915_mmio_reg_offset(EU_PERF_CNTL4),
i915_mmio_reg_offset(EU_PERF_CNTL5),
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
u32 *cs;
int i;
cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
(dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME;
for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
u32 mmio = flex_mmio[i];
/*
* This arbitrary default will select the 'EU FPU0 Pipeline
* Active' event. In the future it's anticipated that there
* will be an explicit 'No Event' we can select, but not
* yet...
*/
u32 value = 0;
if (oa_config) {
u32 j;
for (j = 0; j < oa_config->flex_regs_len; j++) {
if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
value = oa_config->flex_regs[j].value;
break;
}
}
}
*cs++ = mmio;
*cs++ = value;
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_timeline *timeline;
struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
i915_retire_requests(dev_priv);
rq = i915_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
ret = gen8_emit_oa_config(rq, oa_config);
if (ret) {
i915_request_add(rq);
return ret;
}
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
struct i915_request *prev;
prev = i915_gem_active_raw(&timeline->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
i915_request_await_dma_fence(rq, &prev->fence);
}
i915_request_add(rq);
return 0;
}
/*
* Manages updating the per-context aspects of the OA stream
* configuration across all contexts.
@ -1808,17 +1707,13 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
int ret;
unsigned int wait_flags = I915_WAIT_LOCKED;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Switch away from any user context. */
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
if (ret)
return ret;
/*
* The OA register config is setup through the context image. This image
* might be written to by the GPU on context switch (in particular on
@ -1833,7 +1728,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* the GPU from any submitted work.
*/
ret = i915_gem_wait_for_idle(dev_priv,
wait_flags,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@ -1847,7 +1742,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
if (!ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
if (IS_ERR(regs))
return PTR_ERR(regs);
@ -1859,7 +1754,17 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ce->state->obj);
}
return ret;
/*
* Apply the configuration by doing one context restore of the edited
* context image.
*/
rq = i915_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_add(rq);
return 0;
}
static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,

Просмотреть файл

@ -9583,6 +9583,54 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
#define BXT_REQ_DATA_MASK 0x3F
#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
#define BXT_D_CR_DRP0_DUNIT8 0x1000
#define BXT_D_CR_DRP0_DUNIT9 0x1200
#define BXT_D_CR_DRP0_DUNIT_START 8
#define BXT_D_CR_DRP0_DUNIT_END 11
#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
_PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
BXT_D_CR_DRP0_DUNIT9))
#define BXT_DRAM_RANK_MASK 0x3
#define BXT_DRAM_RANK_SINGLE 0x1
#define BXT_DRAM_RANK_DUAL 0x3
#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
#define BXT_DRAM_WIDTH_SHIFT 4
#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
#define BXT_DRAM_SIZE_MASK (0x7 << 6)
#define BXT_DRAM_SIZE_SHIFT 6
#define BXT_DRAM_SIZE_4GB (0x0 << 6)
#define BXT_DRAM_SIZE_6GB (0x1 << 6)
#define BXT_DRAM_SIZE_8GB (0x2 << 6)
#define BXT_DRAM_SIZE_12GB (0x3 << 6)
#define BXT_DRAM_SIZE_16GB (0x4 << 6)
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
#define SKL_REQ_DATA_MASK (0xF << 0)
#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
#define SKL_DRAM_S_SHIFT 16
#define SKL_DRAM_SIZE_MASK 0x3F
#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
#define SKL_DRAM_WIDTH_SHIFT 8
#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
#define SKL_DRAM_RANK_MASK (0x1 << 10)
#define SKL_DRAM_RANK_SHIFT 10
#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
#define SKL_DRAM_RANK_DUAL (0x1 << 10)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
@ -10231,6 +10279,12 @@ enum skl_power_gate {
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
#define _ICL_DSI_T_INIT_MASTER_0 0x6b088
#define _ICL_DSI_T_INIT_MASTER_1 0x6b888
#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \
_ICL_DSI_T_INIT_MASTER_0,\
_ICL_DSI_T_INIT_MASTER_1)
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)

Просмотреть файл

@ -732,13 +732,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq = kmem_cache_alloc(i915->requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
i915_retire_requests(i915);
/* Ratelimit ourselves to prevent oom from malicious clients */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err_unreserve;
rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
&i915->drm.struct_mutex);
if (rq)
cond_synchronize_rcu(rq->rcustate);
/*
* We've forced the client to stall and catch up with whatever
@ -758,6 +758,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
rq->rcustate = get_state_synchronize_rcu();
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
rq->engine = engine;

Просмотреть файл

@ -100,6 +100,14 @@ struct i915_request {
struct i915_timeline *timeline;
struct intel_signal_node signaling;
/*
* The rcu epoch of when this request was allocated. Used to judiciously
* apply backpressure on future allocations to ensure that under
* mempressure there is sufficient RCU ticks for us to reclaim our
* RCU protected slabs.
*/
unsigned long rcustate;
/*
* Fences for the various phases in the request's lifetime.
*

Просмотреть файл

@ -24,13 +24,13 @@ enum {
DEBUG_FENCE_NOTIFY,
};
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
static void *i915_sw_fence_debug_hint(void *addr)
{
return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
}
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
static struct debug_obj_descr i915_sw_fence_debug_descr = {
.name = "i915_sw_fence",
.debug_hint = i915_sw_fence_debug_hint,
@ -393,10 +393,11 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
pr_warn("asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
i915_sw_fence_complete(fence);
}

Просмотреть файл

@ -113,71 +113,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *intel_state)
{
struct drm_plane *plane = intel_state->base.plane;
struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int ret;
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
if (state->fb && drm_rotation_90_or_270(state->rotation)) {
struct drm_format_name_buf format_name;
if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED &&
state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
/*
* 90/270 is not allowed with RGB64 16:16:16:16,
* RGB 16-bit 5:6:5, and Indexed 8-bit.
* TBD: Add RGB64 case once its added in supported format list.
*/
switch (state->fb->format->format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(state->fb->format->format,
&format_name));
return -EINVAL;
default:
break;
}
}
/* CHV ignores the mirror bit when the rotate bit is set :( */
if (IS_CHERRYVIEW(dev_priv) &&
state->rotation & DRM_MODE_ROTATE_180 &&
state->rotation & DRM_MODE_REFLECT_X) {
DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
intel_state->base.visible = false;
ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
/*
* Y-tiling is not supported in IF-ID Interlace mode in
* GEN9 and above.
*/
if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
}
/* FIXME pre-g4x don't work like this */
if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);

Просмотреть файл

@ -34,6 +34,9 @@
* low-power state and comes back to normal.
*/
#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
@ -304,6 +307,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
if (csr->fw_path == i915_modparams.dmc_firmware_path) {
/* Bypass version check for firmware override. */
required_version = csr->version;
} else if (IS_ICELAKE(dev_priv)) {
required_version = ICL_CSR_VERSION_REQUIRED;
} else if (IS_CANNONLAKE(dev_priv)) {
required_version = CNL_CSR_VERSION_REQUIRED;
} else if (IS_GEMINILAKE(dev_priv)) {
@ -471,6 +476,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (i915_modparams.dmc_firmware_path)
csr->fw_path = i915_modparams.dmc_firmware_path;
else if (IS_ICELAKE(dev_priv))
csr->fw_path = I915_CSR_ICL;
else if (IS_CANNONLAKE(dev_priv))
csr->fw_path = I915_CSR_CNL;
else if (IS_GEMINILAKE(dev_priv))

Просмотреть файл

@ -2077,7 +2077,7 @@ out:
static inline enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
{
/* CNL HW requires corresponding AUX IOs to be powered up for PSR with
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
* DC states enabled at the same time, while for driver initiated AUX
* transfers we need the same AUX IOs to be powered but with DC states
* disabled. Accordingly use the AUX power domain here which leaves DC

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -497,18 +497,21 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long flags;
#define PLANE_HAS_FENCE BIT(0)
struct {
u32 offset;
/*
* Plane stride in:
* bytes for 0/180 degree rotation
* pixels for 90/270 degree rotation
*/
u32 stride;
int x, y;
} main;
struct {
u32 offset;
int x, y;
} aux;
} color_plane[2];
/* plane control register */
u32 ctl;
@ -950,10 +953,8 @@ struct intel_plane {
enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
bool can_scale;
bool has_fbc;
bool has_ccs;
int max_downscale;
uint32_t frontbuffer_bit;
struct {
@ -966,6 +967,9 @@ struct intel_plane {
* the intel_plane_state structure and accessed via plane_state.
*/
unsigned int (*max_stride)(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
@ -1442,7 +1446,7 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
struct drm_atomic_state *old_state);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int plane, unsigned int height);
int color_plane, unsigned int height);
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
@ -1565,7 +1569,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags);
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
@ -1614,8 +1618,6 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
u32 intel_compute_tile_offset(int *x, int *y,
const struct intel_plane_state *state, int plane);
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@ -1645,8 +1647,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
uint32_t pixel_format);
int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format);
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
@ -1658,12 +1660,14 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_color_ctl(const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation);
int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
@ -2131,6 +2135,13 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
unsigned int skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);

Просмотреть файл

@ -990,6 +990,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
}
local_bh_enable();
/* Otherwise flush the tasklet if it was on another cpu */
tasklet_unlock_wait(t);
if (READ_ONCE(engine->execlists.active))
return false;
}

Просмотреть файл

@ -670,8 +670,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible;
cache->plane.adjusted_x = plane_state->main.x;
cache->plane.adjusted_y = plane_state->main.y;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->base.src.y1 >> 16;
if (!cache->plane.visible)

Просмотреть файл

@ -175,6 +175,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
@ -214,8 +217,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* BIOS is suitable for own access.
*/
vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
DRM_MODE_ROTATE_0,
false, &flags);
&view, false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;

Просмотреть файл

@ -557,16 +557,36 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
engine)->lrc_desc);
struct intel_context *ce = to_intel_context(client->owner, engine);
u32 data[7];
/*
* The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
* See guc_fill_preempt_context().
*/
if (!ce->ring->emit) { /* recreate upon load/resume */
u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs;
cs = ce->ring->vaddr;
if (engine->id == RCS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
addr);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
flush_ggtt_writes(ce->ring->vma);
}
spin_lock_irq(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
spin_unlock_irq(&client->wq_lock);
@ -1044,50 +1064,6 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
#undef SR_DISABLED
}
static void guc_fill_preempt_context(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_guc_client *client = guc->preempt_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce =
to_intel_context(client->owner, engine);
u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs;
GEM_BUG_ON(!ce->pin_count);
/*
* We rely on this context image *not* being saved after
* preemption. This ensures that the RING_HEAD / RING_TAIL
* remain pointing at initial values forever.
*/
GEM_BUG_ON(!ctx_save_restore_disabled(ce));
cs = ce->ring->vaddr;
if (id == RCS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
addr);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
GUC_PREEMPT_BREADCRUMB_BYTES);
flush_ggtt_writes(ce->ring->vma);
}
}
static int guc_clients_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@ -1118,8 +1094,6 @@ static int guc_clients_create(struct intel_guc *guc)
return PTR_ERR(client);
}
guc->preempt_client = client;
guc_fill_preempt_context(guc);
}
return 0;

Просмотреть файл

@ -1294,7 +1294,7 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
err = i915_gem_object_set_to_wc_domain(vma->obj, true);
if (err)
return err;
}
@ -1322,7 +1322,9 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(ctx->i915) |
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_vma;
@ -1338,11 +1340,13 @@ __execlists_context_pin(struct intel_engine_cs *engine,
intel_lr_context_descriptor_update(ctx, engine, ce);
GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head;
ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
@ -2392,7 +2396,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
ret = intel_engine_init_common(engine);
if (ret)
goto error;
return ret;
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
@ -2434,10 +2438,6 @@ static int logical_ring_init(struct intel_engine_cs *engine)
reset_csb_pointers(execlists);
return 0;
error:
intel_logical_ring_cleanup(engine);
return ret;
}
int logical_render_ring_init(struct intel_engine_cs *engine)
@ -2460,10 +2460,14 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
ret = logical_ring_init(engine);
if (ret)
return ret;
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
goto err_cleanup_common;
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@ -2475,7 +2479,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
return logical_ring_init(engine);
return 0;
err_cleanup_common:
intel_engine_cleanup_common(engine);
return ret;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
@ -2841,13 +2849,14 @@ error_deref_obj:
return ret;
}
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
void intel_lr_context_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
/* Because we emit WA_TAIL_DWORDS there may be a disparity
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
* that stored in context. As we only write new commands from
* ce->ring->tail onwards, everything before that is junk. If the GPU
@ -2857,28 +2866,22 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) {
list_for_each_entry(ctx, &i915->contexts.list, link) {
for_each_engine(engine, i915, id) {
struct intel_context *ce =
to_intel_context(ctx, engine);
u32 *reg;
if (!ce->state)
continue;
reg = i915_gem_object_pin_map(ce->state->obj,
I915_MAP_WB);
if (WARN_ON(IS_ERR(reg)))
continue;
reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
reg[CTX_RING_HEAD+1] = 0;
reg[CTX_RING_TAIL+1] = 0;
ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
intel_ring_reset(ce->ring, 0);
if (ce->pin_count) { /* otherwise done in context_pin */
u32 *regs = ce->lrc_reg_state;
regs[CTX_RING_HEAD + 1] = ce->ring->head;
regs[CTX_RING_TAIL + 1] = ce->ring->tail;
}
}
}
}

Просмотреть файл

@ -487,23 +487,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
overlay->active = false;
}
struct put_image_params {
int format;
short dst_x;
short dst_y;
short dst_w;
short dst_h;
short src_w;
short src_scan_h;
short src_scan_w;
short src_h;
short stride_Y;
short stride_UV;
int offset_Y;
int offset_U;
int offset_V;
};
static int packed_depth_bytes(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
@ -618,25 +601,25 @@ static void update_polyphase_filter(struct overlay_registers __iomem *regs)
static bool update_scaling_factors(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs,
struct put_image_params *params)
struct drm_intel_overlay_put_image *params)
{
/* fixed point with a 12 bit shift */
u32 xscale, yscale, xscale_UV, yscale_UV;
#define FP_SHIFT 12
#define FRACT_MASK 0xfff
bool scale_changed = false;
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
int uv_hscale = uv_hsubsampling(params->flags);
int uv_vscale = uv_vsubsampling(params->flags);
if (params->dst_w > 1)
xscale = ((params->src_scan_w - 1) << FP_SHIFT)
/(params->dst_w);
if (params->dst_width > 1)
xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
params->dst_width;
else
xscale = 1 << FP_SHIFT;
if (params->dst_h > 1)
yscale = ((params->src_scan_h - 1) << FP_SHIFT)
/(params->dst_h);
if (params->dst_height > 1)
yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
params->dst_height;
else
yscale = 1 << FP_SHIFT;
@ -713,12 +696,12 @@ static void update_colorkey(struct intel_overlay *overlay,
iowrite32(flags, &regs->DCLRKM);
}
static u32 overlay_cmd_reg(struct put_image_params *params)
static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
{
u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
if (params->flags & I915_OVERLAY_YUV_PLANAR) {
switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
case I915_OVERLAY_YUV422:
cmd |= OCMD_YUV_422_PLANAR;
break;
@ -731,7 +714,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
break;
}
} else { /* YUV packed */
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
case I915_OVERLAY_YUV422:
cmd |= OCMD_YUV_422_PACKED;
break;
@ -740,7 +723,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
break;
}
switch (params->format & I915_OVERLAY_SWAP_MASK) {
switch (params->flags & I915_OVERLAY_SWAP_MASK) {
case I915_OVERLAY_NO_SWAP:
break;
case I915_OVERLAY_UV_SWAP:
@ -760,7 +743,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
struct drm_intel_overlay_put_image *params)
{
struct overlay_registers __iomem *regs = overlay->regs;
struct drm_i915_private *dev_priv = overlay->i915;
@ -806,35 +789,40 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS);
iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
if (params->flags & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->flags,
params->src_width);
else
tmp_width = params->src_w;
tmp_width = params->src_width;
swidth = params->src_w;
swidth = params->src_width;
swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h;
sheight = params->src_height;
iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
if (params->flags & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->flags);
int uv_vscale = uv_vsubsampling(params->flags);
u32 tmp_U, tmp_V;
swidth |= (params->src_w/uv_hscale) << 16;
swidth |= (params->src_width / uv_hscale) << 16;
sheight |= (params->src_height / uv_vscale) << 16;
tmp_U = calc_swidthsw(dev_priv, params->offset_U,
params->src_w/uv_hscale);
params->src_width / uv_hscale);
tmp_V = calc_swidthsw(dev_priv, params->offset_V,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
params->src_width / uv_hscale);
swidthsw |= max(tmp_U, tmp_V) << 16;
iowrite32(i915_ggtt_offset(vma) + params->offset_U,
&regs->OBUF_0U);
iowrite32(i915_ggtt_offset(vma) + params->offset_V,
&regs->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@ -938,15 +926,16 @@ static int check_overlay_dst(struct intel_overlay *overlay,
return -EINVAL;
}
static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
{
u32 tmp;
/* downscaling limit is 8.0 */
tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
if (tmp > 7)
return -EINVAL;
tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
if (tmp > 7)
return -EINVAL;
@ -1067,13 +1056,12 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_intel_overlay_put_image *params = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
int ret;
overlay = dev_priv->overlay;
@ -1082,7 +1070,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
@ -1094,22 +1082,14 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return ret;
}
params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id);
if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
}
drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
if (!drmmode_crtc)
return -ENOENT;
crtc = to_intel_crtc(drmmode_crtc);
new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle);
if (!new_bo) {
ret = -ENOENT;
goto out_free;
}
new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
if (!new_bo)
return -ENOENT;
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
@ -1145,42 +1125,27 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay->pfit_active = false;
}
ret = check_overlay_dst(overlay, put_image_rec);
ret = check_overlay_dst(overlay, params);
if (ret != 0)
goto out_unlock;
if (overlay->pfit_active) {
params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
params->dst_y = (((u32)params->dst_y << 12) /
overlay->pfit_vscale_ratio);
/* shifting right rounds downwards, so add 1 */
params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
params->dst_height = (((u32)params->dst_height << 12) /
overlay->pfit_vscale_ratio) + 1;
} else {
params->dst_y = put_image_rec->dst_y;
params->dst_h = put_image_rec->dst_height;
}
params->dst_x = put_image_rec->dst_x;
params->dst_w = put_image_rec->dst_width;
params->src_w = put_image_rec->src_width;
params->src_h = put_image_rec->src_height;
params->src_scan_w = put_image_rec->src_scan_width;
params->src_scan_h = put_image_rec->src_scan_height;
if (params->src_scan_h > params->src_h ||
params->src_scan_w > params->src_w) {
if (params->src_scan_height > params->src_height ||
params->src_scan_width > params->src_width) {
ret = -EINVAL;
goto out_unlock;
}
ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
ret = check_overlay_src(dev_priv, params, new_bo);
if (ret != 0)
goto out_unlock;
params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
params->stride_Y = put_image_rec->stride_Y;
params->stride_UV = put_image_rec->stride_UV;
params->offset_Y = put_image_rec->offset_Y;
params->offset_U = put_image_rec->offset_U;
params->offset_V = put_image_rec->offset_V;
/* Check scaling after src size to prevent a divide-by-zero. */
ret = check_overlay_scaling(params);
@ -1195,16 +1160,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
kfree(params);
return 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
out_free:
kfree(params);
return ret;
}

Просмотреть файл

@ -2875,6 +2875,16 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
}
/*
* WA Level-0 adjustment for 16GB DIMMs: SKL+
* If we could not get dimm info enable this WA to prevent from
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
if (!dev_priv->dram_info.valid_dimm ||
dev_priv->dram_info.is_16gb_dimm)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
@ -6108,10 +6118,13 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
u32 val;
/* Display WA #0477 WaDisableIPC: skl */
if (IS_SKYLAKE(dev_priv)) {
if (IS_SKYLAKE(dev_priv))
dev_priv->ipc_enabled = false;
/* Display WA #1141: SKL:all KBL:all CFL */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
!dev_priv->dram_info.symmetric_memory)
dev_priv->ipc_enabled = false;
return;
}
val = I915_READ(DISP_ARB_CTL2);

Просмотреть файл

@ -1677,9 +1677,26 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (ppgtt) {
ret = load_pd_dir(rq, ppgtt);
if (ret)
goto err;
int loops;
/*
* Baytail takes a little more convincing that it really needs
* to reload the PD between contexts. It is not just a little
* longer, as adding more stalls after the load_pd_dir (i.e.
* adding a long loop around flush_pd_dir) is not as effective
* as reloading the PD umpteen times. 32 is derived from
* experimentation (gem_exec_parallel/fds) and has no good
* explanation.
*/
loops = 1;
if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
loops = 32;
do {
ret = load_pd_dir(rq, ppgtt);
if (ret)
goto err;
} while (--loops);
if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
unwind_mm = intel_engine_flag(engine);

Просмотреть файл

@ -1996,6 +1996,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
#define ICL_AUX_A_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
@ -3563,6 +3564,9 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 7. Setup MBUS. */
icl_mbus_init(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)

Просмотреть файл

@ -99,31 +99,12 @@ struct intel_sdvo {
*/
uint16_t hotplug_active;
/**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
* to decide this for us, the S-Video output on our HDMI+S-Video card
* shows up as RGB1 (VGA).
*/
bool is_tv;
enum port port;
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
/**
* This is set if we detect output of sdvo device as LVDS and
* have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
* This is sdvo fixed pannel mode pointer
*/
@ -172,6 +153,11 @@ struct intel_sdvo_connector {
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
};
struct intel_sdvo_connector_state {
@ -766,6 +752,7 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
uint16_t clock,
uint16_t width,
uint16_t height)
@ -778,7 +765,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
if (intel_sdvo->is_lvds &&
if (IS_LVDS(intel_sdvo_connector) &&
(intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
@ -1067,6 +1054,7 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
*/
static bool
intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@ -1077,6 +1065,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
intel_sdvo_connector,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay))
@ -1127,6 +1116,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector_state *intel_sdvo_state =
to_intel_sdvo_connector_state(conn_state);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->base.mode;
@ -1142,20 +1133,22 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
* timings, even though this isn't really the right place in
* the sequence to do it. Oh well.
*/
if (intel_sdvo->is_tv) {
if (IS_TV(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
intel_sdvo_connector,
mode,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (intel_sdvo->is_lvds) {
} else if (IS_LVDS(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
intel_sdvo_connector,
mode,
adjusted_mode);
}
@ -1194,11 +1187,11 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
}
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
if (IS_TV(intel_sdvo_connector))
i9xx_adjust_sdvo_tv_clock(pipe_config);
/* Set user selected PAR to incoming mode's member */
if (intel_sdvo->is_hdmi)
if (intel_sdvo_connector->is_hdmi)
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
return true;
@ -1275,6 +1268,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
const struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
const struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
@ -1304,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* lvds has a special fixed output timing. */
if (intel_sdvo->is_lvds)
if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_dtd_from_mode(&output_dtd,
intel_sdvo->sdvo_lvds_fixed_mode);
else
@ -1325,13 +1320,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo->is_tv &&
if (IS_TV(intel_sdvo_connector) &&
!intel_sdvo_set_tv_format(intel_sdvo, conn_state))
return;
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
@ -1630,6 +1625,8 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@ -1644,7 +1641,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
if (intel_sdvo->is_lvds) {
if (IS_LVDS(intel_sdvo_connector)) {
if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
@ -1759,6 +1756,8 @@ static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
enum drm_connector_status status;
struct edid *edid;
@ -1797,7 +1796,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
if (intel_sdvo->is_hdmi) {
if (intel_sdvo_connector->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
intel_sdvo->rgb_quant_range_selectable =
@ -1875,17 +1874,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
intel_sdvo->is_tv = false;
intel_sdvo->is_lvds = false;
if (response & SDVO_TV_MASK)
intel_sdvo->is_tv = true;
if (response & SDVO_LVDS_MASK)
intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
return ret;
}
@ -2054,16 +2042,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
intel_sdvo->is_lvds = true;
break;
}
}
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@ -2555,7 +2533,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (INTEL_GEN(dev_priv) >= 4 &&
intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
intel_sdvo_connector->is_hdmi = true;
}
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@ -2563,7 +2541,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
return false;
}
if (intel_sdvo->is_hdmi)
if (intel_sdvo_connector->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
return true;
@ -2591,8 +2569,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type;
intel_sdvo->is_tv = true;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
return false;
@ -2654,6 +2630,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
struct drm_display_mode *mode;
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
@ -2682,6 +2659,19 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
intel_sdvo_get_lvds_modes(connector);
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, mode);
break;
}
}
if (!intel_sdvo->sdvo_lvds_fixed_mode)
goto err;
return true;
err:
@ -2692,9 +2682,6 @@ err:
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
{
intel_sdvo->is_tv = false;
intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)

Просмотреть файл

@ -230,6 +230,56 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
#endif
}
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_rect *src = &plane_state->base.src;
u32 src_x, src_y, src_w, src_h;
/*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
src_x = src->x1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
src->x1 = src_x << 16;
src->x2 = (src_x + src_w) << 16;
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
if (fb->format->is_yuv &&
fb->format->format != DRM_FORMAT_NV12 &&
(src_x & 1 || src_w & 1)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
src_x, src_w);
return -EINVAL;
}
return 0;
}
unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
int cpp = drm_format_plane_cpp(pixel_format, 0);
/*
* "The stride in bytes must not exceed the
* of the size of 8K pixels and 32K bytes."
*/
if (drm_rotation_90_or_270(rotation))
return min(8192, 32768 / cpp);
else
return min(8192 * cpp, 32768);
}
void
skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@ -241,16 +291,15 @@ skl_update_plane(struct intel_plane *plane,
enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
u32 aux_stride = skl_plane_stride(fb, 1, rotation);
u32 surf_addr = plane_state->color_plane[0].offset;
u32 stride = skl_plane_stride(plane_state, 0);
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->main.x;
uint32_t y = plane_state->main.y;
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
@ -277,9 +326,10 @@ skl_update_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->aux.offset - surf_addr) | aux_stride);
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
(plane_state->aux.y << 16) | plane_state->aux.x);
(plane_state->color_plane[1].y << 16) |
plane_state->color_plane[1].x);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
@ -545,15 +595,15 @@ vlv_update_plane(struct intel_plane *plane,
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u32 sprctl = plane_state->ctl;
u32 sprsurf_offset = plane_state->main.offset;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->main.x;
uint32_t y = plane_state->main.y;
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
unsigned long irqflags;
/* Sizes are 0 based */
@ -574,7 +624,8 @@ vlv_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
}
I915_WRITE_FW(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
plane_state->color_plane[0].stride);
I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@ -704,15 +755,15 @@ ivb_update_plane(struct intel_plane *plane,
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 sprctl = plane_state->ctl, sprscale = 0;
u32 sprsurf_offset = plane_state->main.offset;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->main.x;
uint32_t y = plane_state->main.y;
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
@ -736,7 +787,7 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
}
I915_WRITE_FW(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@ -749,7 +800,7 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (plane->can_scale)
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), sprscale);
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
@ -770,7 +821,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
I915_WRITE_FW(SPRCTL(pipe), 0);
/* Can't leave the scaler enabled... */
if (plane->can_scale)
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
I915_WRITE_FW(SPRSURF(pipe), 0);
@ -800,6 +851,14 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
return 16384;
}
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@ -868,15 +927,15 @@ g4x_update_plane(struct intel_plane *plane,
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 dvscntr = plane_state->ctl, dvsscale = 0;
u32 dvssurf_offset = plane_state->main.offset;
u32 dvssurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->main.x;
uint32_t y = plane_state->main.y;
uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
@ -900,7 +959,7 @@ g4x_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
}
I915_WRITE_FW(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@ -959,144 +1018,309 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
}
static int
intel_check_sprite_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = state->base.fb;
int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
int max_scale, min_scale;
bool can_scale;
int ret;
uint32_t pixel_format = 0;
const struct drm_framebuffer *fb = plane_state->base.fb;
const struct drm_rect *src = &plane_state->base.src;
const struct drm_rect *dst = &plane_state->base.dst;
int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
if (!fb) {
state->base.visible = false;
crtc_w = drm_rect_width(dst);
crtc_h = drm_rect_height(dst);
src_x = src->x1 >> 16;
src_y = src->y1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_h = drm_rect_height(src) >> 16;
if (src_w == crtc_w && src_h == crtc_h)
return 0;
}
/* Don't modify another pipe's plane */
if (plane->pipe != crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
return -EINVAL;
}
min_width = 3;
/* FIXME check all gen limits */
if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > max_stride) {
DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
return -EINVAL;
}
/* setup can_scale, min_scale, max_scale */
if (INTEL_GEN(dev_priv) >= 9) {
if (state->base.fb)
pixel_format = state->base.fb->format->format;
/* use scaler when colorkey is not required */
if (!state->ckey.flags) {
can_scale = 1;
min_scale = 1;
max_scale =
skl_max_scale(crtc, crtc_state, pixel_format);
} else {
can_scale = 0;
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (src_h & 1) {
DRM_DEBUG_KMS("Source height must be even with interlaced modes\n");
return -EINVAL;
}
min_height = 6;
} else {
can_scale = plane->can_scale;
max_scale = plane->max_downscale << 16;
min_scale = plane->can_scale ? 1 : (1 << 16);
min_height = 3;
}
ret = drm_atomic_helper_check_plane_state(&state->base,
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
if (src_w < min_width || src_h < min_height ||
src_w > 2048 || src_h > 2048) {
DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
src_w, src_h, min_width, min_height, 2048, 2048);
return -EINVAL;
}
if (width_bytes > 4096) {
DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n",
width_bytes, 4096);
return -EINVAL;
}
if (width_bytes > 4096 || fb->pitches[0] > 4096) {
DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
fb->pitches[0], 4096);
return -EINVAL;
}
return 0;
}
static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int max_scale, min_scale;
int ret;
if (INTEL_GEN(dev_priv) < 7) {
min_scale = 1;
max_scale = 16 << 16;
} else if (IS_IVYBRIDGE(dev_priv)) {
min_scale = 1;
max_scale = 2 << 16;
} else {
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
&crtc_state->base,
min_scale, max_scale,
true, true);
if (ret)
return ret;
if (state->base.visible) {
struct drm_rect *src = &state->base.src;
struct drm_rect *dst = &state->base.dst;
unsigned int crtc_w = drm_rect_width(dst);
unsigned int crtc_h = drm_rect_height(dst);
uint32_t src_x, src_y, src_w, src_h;
if (!plane_state->base.visible)
return 0;
/*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
src_x = src->x1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
ret = intel_plane_check_src_coordinates(plane_state);
if (ret)
return ret;
src->x1 = src_x << 16;
src->x2 = (src_x + src_w) << 16;
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
ret = g4x_sprite_check_scaling(crtc_state, plane_state);
if (ret)
return ret;
if (fb->format->is_yuv &&
fb->format->format != DRM_FORMAT_NV12 &&
(src_x % 2 || src_w % 2)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
src_x, src_w);
ret = i9xx_check_plane_surface(plane_state);
if (ret)
return ret;
if (INTEL_GEN(dev_priv) >= 7)
plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
else
plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
return 0;
}
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned int rotation = plane_state->base.rotation;
/* CHV ignores the mirror bit when the rotate bit is set :( */
if (IS_CHERRYVIEW(dev_priv) &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
return 0;
}
static int
vlv_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
int ret;
ret = chv_plane_check_rotation(plane_state);
if (ret)
return ret;
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
&crtc_state->base,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
if (ret)
return ret;
if (!plane_state->base.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
if (ret)
return ret;
ret = i9xx_check_plane_surface(plane_state);
if (ret)
return ret;
plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
return 0;
}
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
struct drm_format_name_buf format_name;
if (!fb)
return 0;
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
is_ccs_modifier(fb->modifier)) {
DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
rotation);
return -EINVAL;
}
if (rotation & DRM_MODE_REFLECT_X &&
fb->modifier == DRM_FORMAT_MOD_LINEAR) {
DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
return -EINVAL;
}
if (drm_rotation_90_or_270(rotation)) {
if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
/* Check size restrictions when scaling */
if (src_w != crtc_w || src_h != crtc_h) {
unsigned int width_bytes;
int cpp = fb->format->cpp[0];
WARN_ON(!can_scale);
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
/* FIXME interlacing min height is 6 */
if (INTEL_GEN(dev_priv) < 9 && (
src_w < 3 || src_h < 3 ||
src_w > 2048 || src_h > 2048 ||
crtc_w < 3 || crtc_h < 3 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
/*
* 90/270 is not allowed with RGB64 16:16:16:16,
* RGB 16-bit 5:6:5, and Indexed 8-bit.
* TBD: Add RGB64 case once its added in supported format list.
*/
switch (fb->format->format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
return -EINVAL;
default:
break;
}
}
if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(crtc_state, state);
if (ret)
return ret;
state->ctl = skl_plane_ctl(crtc_state, state);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = i9xx_check_plane_surface(state);
if (ret)
return ret;
state->ctl = vlv_sprite_ctl(crtc_state, state);
} else if (INTEL_GEN(dev_priv) >= 7) {
ret = i9xx_check_plane_surface(state);
if (ret)
return ret;
state->ctl = ivb_sprite_ctl(crtc_state, state);
} else {
ret = i9xx_check_plane_surface(state);
if (ret)
return ret;
state->ctl = g4x_sprite_ctl(crtc_state, state);
/* Y-tiling is not supported in IF-ID Interlace mode */
if (crtc_state->base.enable &&
crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
(fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
return 0;
}
static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
int crtc_x = plane_state->base.dst.x1;
int crtc_w = drm_rect_width(&plane_state->base.dst);
int pipe_src_w = crtc_state->pipe_src_w;
/*
* Display WA #1175: cnl,glk
* Planes other than the cursor may cause FIFO underflow and display
* corruption if starting less than 4 pixels from the right edge of
* the screen.
* Besides the above WA fix the similar problem, where planes other
* than the cursor ending less than 4 pixels from the left edge of the
* screen may cause FIFO underflow and display corruption.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
(crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
crtc_x + crtc_w < 4 ? "end" : "start",
crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
4, pipe_src_w - 4);
return -ERANGE;
}
return 0;
}
int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int max_scale, min_scale;
int ret;
ret = skl_plane_check_fb(crtc_state, plane_state);
if (ret)
return ret;
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags) {
const struct drm_framebuffer *fb = plane_state->base.fb;
min_scale = 1;
max_scale = skl_max_scale(crtc_state,
fb ? fb->format->format : 0);
} else {
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
&crtc_state->base,
min_scale, max_scale,
true, true);
if (ret)
return ret;
if (!plane_state->base.visible)
return 0;
ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
if (ret)
return ret;
ret = intel_plane_check_src_coordinates(plane_state);
if (ret)
return ret;
ret = skl_check_plane_surface(plane_state);
if (ret)
return ret;
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
state->color_ctl = glk_plane_color_ctl(crtc_state, state);
plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
plane_state);
return 0;
}
@ -1523,15 +1747,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->base.state = &state->base;
if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
state->scaler_id = -1;
intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
PLANE_SPRITE0 + plane);
intel_plane->max_stride = skl_plane_max_stride;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
intel_plane->check_plane = skl_plane_check;
if (skl_plane_has_planar(dev_priv, pipe,
PLANE_SPRITE0 + plane)) {
@ -1549,12 +1774,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
intel_plane->max_stride = i9xx_plane_max_stride;
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
intel_plane->get_hw_state = vlv_plane_get_hw_state;
intel_plane->check_plane = vlv_sprite_check;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@ -1562,17 +1786,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
}
intel_plane->max_stride = g4x_sprite_max_stride;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->get_hw_state = ivb_plane_get_hw_state;
intel_plane->check_plane = g4x_sprite_check;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@ -1580,12 +1798,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &snb_sprite_funcs;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->max_stride = g4x_sprite_max_stride;
intel_plane->update_plane = g4x_update_plane;
intel_plane->disable_plane = g4x_disable_plane;
intel_plane->get_hw_state = g4x_plane_get_hw_state;
intel_plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
@ -1618,7 +1835,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->i9xx_plane = plane;
intel_plane->id = PLANE_SPRITE0 + plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
intel_plane->check_plane = intel_check_sprite_plane;
possible_crtcs = (1 << pipe);

Просмотреть файл

@ -401,6 +401,10 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
} else if (INTEL_GEN(i915) < 11) {
ret = intel_guc_sample_forcewake(guc);
if (ret)
goto err_communication;
}
dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",

Просмотреть файл

@ -235,6 +235,8 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
sg = sg_next(sg);
} while (1);
i915_sg_trim(st);
obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg_page_sizes);

Просмотреть файл

@ -298,6 +298,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@ -375,6 +376,7 @@ static int igt_gem_coherency(void *arg)
}
}
unlock:
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;

Просмотреть файл

@ -22,6 +22,8 @@
*
*/
#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
@ -32,6 +34,200 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
struct live_test {
struct drm_i915_private *i915;
const char *func;
const char *name;
unsigned int reset_count;
};
static int begin_live_test(struct live_test *t,
struct drm_i915_private *i915,
const char *func,
const char *name)
{
int err;
t->i915 = i915;
t->func = func;
t->name = name;
err = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
return err;
}
i915->gpu_error.missed_irq_rings = 0;
t->reset_count = i915_reset_count(&i915->gpu_error);
return 0;
}
static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
if (igt_flush_test(i915, I915_WAIT_LOCKED))
return -EIO;
if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
pr_err("%s(%s): GPU was reset %d times!\n",
t->func, t->name,
i915_reset_count(&i915->gpu_error) - t->reset_count);
return -EIO;
}
if (i915->gpu_error.missed_irq_rings) {
pr_err("%s(%s): Missed interrupts on engines %lx\n",
t->func, t->name, i915->gpu_error.missed_irq_rings);
return -EIO;
}
return 0;
}
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
enum intel_engine_id id;
struct drm_file *file;
struct live_test t;
unsigned long n;
int err = -ENODEV;
/*
* Create as many contexts as we can feasibly get away with
* and check we can switch between them rapidly.
*
* Serves as very simple stress test for submission and HW switching
* between contexts.
*/
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
goto out_unlock;
}
for (n = 0; n < nctx; n++) {
ctx[n] = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
}
}
for_each_engine(engine, i915, id) {
struct i915_request *rq;
unsigned long end_time, prime;
ktime_t times[2] = {};
times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) {
rq = i915_request_alloc(engine, ctx[n]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
}
i915_request_add(rq);
}
if (i915_request_wait(rq,
I915_WAIT_LOCKED,
HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
i915_gem_set_wedged(i915);
err = -EIO;
goto out_unlock;
}
times[1] = ktime_get_raw();
pr_info("Populated %d contexts on %s in %lluns\n",
nctx, engine->name, ktime_to_ns(times[1] - times[0]));
err = begin_live_test(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
rq = i915_request_alloc(engine, ctx[n % nctx]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
}
/*
* This space is left intentionally blank.
*
* We do not actually want to perform any
* action with this request, we just want
* to measure the latency in allocation
* and submission of our breadcrumbs -
* ensuring that the bare request is sufficient
* for the system to work (i.e. proper HEAD
* tracking of the rings, interrupt handling,
* etc). It also gives us the lowest bounds
* for latency.
*/
i915_request_add(rq);
}
if (i915_request_wait(rq,
I915_WAIT_LOCKED,
HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
i915_gem_set_wedged(i915);
break;
}
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 2)
times[0] = times[1];
if (__igt_timeout(end_time, NULL))
break;
}
err = end_live_test(&t);
if (err)
goto out_unlock;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
ktime_to_ns(times[0]),
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
out_unlock:
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
static struct i915_vma *
gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
{
@ -195,6 +391,7 @@ err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
err_vma:
i915_vma_unpin(vma);
return err;
@ -636,6 +833,8 @@ static int igt_switch_to_kernel_context(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
mutex_unlock(&i915->drm.struct_mutex);
@ -658,6 +857,8 @@ out_unlock:
GEM_TRACE_DUMP_ON(err);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
kernel_context_close(ctx);
@ -713,6 +914,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
};

Просмотреть файл

@ -342,6 +342,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@ -402,6 +403,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -487,6 +489,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@ -550,6 +553,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -644,6 +648,7 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
err = begin_live_test(&t, i915, __func__, "");
if (err)
@ -726,6 +731,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -747,6 +753,7 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
err = begin_live_test(&t, i915, __func__, "");
if (err)
@ -853,6 +860,7 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}

Просмотреть файл

@ -142,6 +142,7 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@ -269,6 +270,7 @@ out:
guc_clients_create(guc);
guc_clients_doorbell_init(guc);
unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@ -287,6 +289,7 @@ static int igt_guc_doorbells(void *arg)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@ -379,6 +382,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}

Просмотреть файл

@ -221,6 +221,7 @@ static int live_sanitycheck(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
if (spinner_init(&spin, i915))
goto err_unlock;
@ -261,6 +262,7 @@ err_spin:
spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -278,6 +280,7 @@ static int live_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
if (spinner_init(&spin_hi, i915))
goto err_unlock;
@ -350,6 +353,7 @@ err_spin_hi:
spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -368,6 +372,7 @@ static int live_late_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
if (spinner_init(&spin_hi, i915))
goto err_unlock;
@ -440,6 +445,7 @@ err_spin_hi:
spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@ -467,6 +473,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
if (spinner_init(&spin_hi, i915))
goto err_unlock;
@ -561,6 +568,7 @@ err_spin_hi:
spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}

Просмотреть файл

@ -44,7 +44,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
intel_runtime_pm_get(engine->i915);
rq = i915_request_alloc(engine, ctx);
intel_runtime_pm_put(engine->i915);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@ -175,7 +177,10 @@ static int switch_to_scratch_context(struct intel_engine_cs *engine)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
rq = i915_request_alloc(engine, ctx);
intel_runtime_pm_put(engine->i915);
kernel_context_close(ctx);
if (IS_ERR(rq))
return PTR_ERR(rq);