Merge tag 'gvt-next-2018-06-19' of https://github.com/intel/gvt-linux into drm-intel-next-queued
gvt-next-2018-06-19 - fine-grained per vgpu locking (Colin) - fine-grained vgpu scheduler locking (Colin) - deliver windows guest cursor hotspot info (Tina) - GVT-g BXT support (Colin) - other misc and checker fixes (Chris, Xinyun) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180619090043.ly6gquafbmxuus6h@zhen-hp.sh.intel.com
This commit is contained in:
Коммит
ac2bf28ad1
|
@ -172,6 +172,7 @@ struct decode_info {
|
|||
#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
|
||||
#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
|
||||
#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
|
||||
#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
|
||||
|
||||
#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
|
||||
#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
|
||||
|
@ -1256,7 +1257,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
|
|||
if (!info->async_flip)
|
||||
return 0;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
|
||||
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
|
||||
GENMASK(12, 10)) >> 10;
|
||||
|
@ -1284,7 +1287,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
|
|||
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
|
||||
info->surf_val << 12);
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
|
||||
info->stride_val);
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
|
||||
|
@ -1308,7 +1313,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
|
|||
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
return gen8_decode_mi_display_flip(s, info);
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv))
|
||||
return skl_decode_mi_display_flip(s, info);
|
||||
|
||||
return -ENODEV;
|
||||
|
@ -1317,26 +1324,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
|
|||
static int check_mi_display_flip(struct parser_exec_state *s,
|
||||
struct mi_display_flip_command_info *info)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||
|
||||
if (IS_BROADWELL(dev_priv)
|
||||
|| IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv))
|
||||
return gen8_check_mi_display_flip(s, info);
|
||||
return -ENODEV;
|
||||
return gen8_check_mi_display_flip(s, info);
|
||||
}
|
||||
|
||||
static int update_plane_mmio_from_mi_display_flip(
|
||||
struct parser_exec_state *s,
|
||||
struct mi_display_flip_command_info *info)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||
|
||||
if (IS_BROADWELL(dev_priv)
|
||||
|| IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv))
|
||||
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
|
||||
return -ENODEV;
|
||||
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
|
||||
}
|
||||
|
||||
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
|
||||
|
@ -1615,15 +1610,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
|
|||
*/
|
||||
static int batch_buffer_needs_scan(struct parser_exec_state *s)
|
||||
{
|
||||
struct intel_gvt *gvt = s->vgpu->gvt;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||
/* BDW decides privilege based on address space */
|
||||
if (cmd_val(s, 0) & (1 << 8) &&
|
||||
/* Decide privilege based on address space */
|
||||
if (cmd_val(s, 0) & (1 << 8) &&
|
||||
!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -2349,6 +2339,9 @@ static struct cmd_info cmd_info[] = {
|
|||
{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
|
||||
0, 16, NULL},
|
||||
|
||||
{"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
|
||||
0, 16, NULL},
|
||||
|
||||
{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
|
||||
|
||||
{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
|
||||
|
|
|
@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
|||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
int pipe;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
|
||||
BXT_DE_PORT_HP_DDIB |
|
||||
BXT_DE_PORT_HP_DDIC);
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
BXT_DE_PORT_HP_DDIA;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
BXT_DE_PORT_HP_DDIB;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
BXT_DE_PORT_HP_DDIC;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT |
|
||||
SDE_PORTD_HOTPLUG_CPT);
|
||||
|
@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
|
|||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
struct intel_vgpu *vgpu;
|
||||
int pipe, id;
|
||||
int found = false;
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
|
||||
return;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
for_each_active_vgpu(gvt, vgpu, id) {
|
||||
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
|
||||
if (pipe_is_enabled(vgpu, pipe))
|
||||
goto out;
|
||||
if (pipe_is_enabled(vgpu, pipe)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
|
||||
/* all the pipes are disabled */
|
||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
return;
|
||||
|
||||
out:
|
||||
hrtimer_start(&irq->vblank_timer.timer,
|
||||
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
|
||||
HRTIMER_MODE_ABS);
|
||||
|
||||
if (!found)
|
||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
else
|
||||
hrtimer_start(&irq->vblank_timer.timer,
|
||||
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
|
||||
HRTIMER_MODE_ABS);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
||||
|
@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
|
|||
{
|
||||
int pipe;
|
||||
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
for_each_pipe(vgpu->gvt->dev_priv, pipe)
|
||||
emulate_vblank_on_pipe(vgpu, pipe);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
|
|||
struct intel_vgpu *vgpu;
|
||||
int id;
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
|
||||
return;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
for_each_active_vgpu(gvt, vgpu, id)
|
||||
emulate_vblank(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
|
|||
|
||||
obj->read_domains = I915_GEM_DOMAIN_GTT;
|
||||
obj->write_domain = 0;
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
unsigned int tiling_mode = 0;
|
||||
unsigned int stride = 0;
|
||||
|
||||
|
@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
|
|||
return obj;
|
||||
}
|
||||
|
||||
static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
|
||||
{
|
||||
if (c && c->x_hot <= c->width && c->y_hot <= c->height)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int vgpu_get_plane_info(struct drm_device *dev,
|
||||
struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_fb_info *info,
|
||||
|
@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
|||
info->x_pos = c.x_pos;
|
||||
info->y_pos = c.y_pos;
|
||||
|
||||
/* The invalid cursor hotspot value is delivered to host
|
||||
* until we find a way to get the cursor hotspot info of
|
||||
* guest OS.
|
||||
*/
|
||||
info->x_hot = UINT_MAX;
|
||||
info->y_hot = UINT_MAX;
|
||||
if (validate_hotspot(&c)) {
|
||||
info->x_hot = c.x_hot;
|
||||
info->y_hot = c.y_hot;
|
||||
} else {
|
||||
info->x_hot = UINT_MAX;
|
||||
info->y_hot = UINT_MAX;
|
||||
}
|
||||
|
||||
info->size = (((info->stride * c.height * c.bpp) / 8)
|
||||
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
} else {
|
||||
|
|
|
@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
|
|||
return chr;
|
||||
}
|
||||
|
||||
static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
|
||||
{
|
||||
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
|
||||
int port = -EINVAL;
|
||||
|
||||
if (port_select == 1)
|
||||
port = PORT_B;
|
||||
else if (port_select == 2)
|
||||
port = PORT_C;
|
||||
else if (port_select == 3)
|
||||
port = PORT_D;
|
||||
return port;
|
||||
}
|
||||
|
||||
static inline int get_port_from_gmbus0(u32 gmbus0)
|
||||
{
|
||||
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
|
||||
|
@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
|
|||
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
int port, pin_select;
|
||||
|
||||
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
|
||||
|
@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
|
|||
if (pin_select == 0)
|
||||
return 0;
|
||||
|
||||
port = get_port_from_gmbus0(pin_select);
|
||||
if (IS_BROXTON(dev_priv))
|
||||
port = bxt_get_port_from_gmbus0(pin_select);
|
||||
else
|
||||
port = get_port_from_gmbus0(pin_select);
|
||||
if (WARN_ON(port < 0))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -146,14 +146,11 @@ struct execlist_ring_context {
|
|||
u32 nop4;
|
||||
u32 lri_cmd_2;
|
||||
struct execlist_mmio_pair ctx_timestamp;
|
||||
struct execlist_mmio_pair pdp3_UDW;
|
||||
struct execlist_mmio_pair pdp3_LDW;
|
||||
struct execlist_mmio_pair pdp2_UDW;
|
||||
struct execlist_mmio_pair pdp2_LDW;
|
||||
struct execlist_mmio_pair pdp1_UDW;
|
||||
struct execlist_mmio_pair pdp1_LDW;
|
||||
struct execlist_mmio_pair pdp0_UDW;
|
||||
struct execlist_mmio_pair pdp0_LDW;
|
||||
/*
|
||||
* pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
|
||||
* pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
|
||||
*/
|
||||
struct execlist_mmio_pair pdps[8];
|
||||
};
|
||||
|
||||
struct intel_vgpu_elsp_dwords {
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <uapi/drm/drm_fourcc.h>
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
|
||||
#define PRIMARY_FORMAT_NUM 16
|
||||
struct pixel_format {
|
||||
|
@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
|
|||
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
|
||||
u32 stride = stride_reg;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
switch (tiled) {
|
||||
case PLANE_CTL_TILED_LINEAR:
|
||||
stride = stride_reg * 64;
|
||||
|
@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
if (!plane->enabled)
|
||||
return -ENODEV;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
|
||||
_PLANE_CTL_TILED_SHIFT;
|
||||
fmt = skl_format_to_drm(
|
||||
|
@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
}
|
||||
|
||||
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
|
||||
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
|
||||
(IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) ?
|
||||
(_PRI_PLANE_STRIDE_MASK >> 6) :
|
||||
_PRI_PLANE_STRIDE_MASK, plane->bpp);
|
||||
|
||||
|
@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
|
|||
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
|
||||
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
|
||||
|
||||
plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
|
||||
plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
|
|||
|
||||
h = (struct gvt_firmware_header *)fw->data;
|
||||
|
||||
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||
crc32_start = offsetofend(struct gvt_firmware_header, crc32);
|
||||
mem = fw->data + crc32_start;
|
||||
|
||||
#define VERIFY(s, a, b) do { \
|
||||
|
|
|
@ -1972,7 +1972,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
|
||||
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
||||
*/
|
||||
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
|
||||
if (type > GTT_TYPE_PPGTT_PTE_PT) {
|
||||
struct intel_gvt_gtt_entry se;
|
||||
|
||||
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
|
||||
|
@ -2256,13 +2256,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
|
||||
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
|
||||
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
|
||||
|
||||
page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
|
|
|
@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt)
|
|||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||
info->max_support_vgpus = 8;
|
||||
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
|
||||
info->mmio_size = 2 * 1024 * 1024;
|
||||
info->mmio_bar = 0;
|
||||
info->gtt_start_offset = 8 * 1024 * 1024;
|
||||
info->gtt_entry_size = 8;
|
||||
info->gtt_entry_size_shift = 3;
|
||||
info->gmadr_bytes_in_cmd = 8;
|
||||
info->max_surface_size = 36 * 1024 * 1024;
|
||||
}
|
||||
info->max_support_vgpus = 8;
|
||||
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
|
||||
info->mmio_size = 2 * 1024 * 1024;
|
||||
info->mmio_bar = 0;
|
||||
info->gtt_start_offset = 8 * 1024 * 1024;
|
||||
info->gtt_entry_size = 8;
|
||||
info->gtt_entry_size_shift = 3;
|
||||
info->gmadr_bytes_in_cmd = 8;
|
||||
info->max_surface_size = 36 * 1024 * 1024;
|
||||
info->msi_cap_offset = pdev->msi_cap;
|
||||
}
|
||||
|
||||
|
@ -271,11 +268,8 @@ static int gvt_service_thread(void *data)
|
|||
continue;
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
|
||||
(void *)&gvt->service_request)) {
|
||||
mutex_lock(&gvt->lock);
|
||||
(void *)&gvt->service_request))
|
||||
intel_gvt_emulate_vblank(gvt);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
if (test_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
(void *)&gvt->service_request) ||
|
||||
|
@ -379,6 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
idr_init(&gvt->vgpu_idr);
|
||||
spin_lock_init(&gvt->scheduler.mmio_context_lock);
|
||||
mutex_init(&gvt->lock);
|
||||
mutex_init(&gvt->sched_lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
init_device_info(gvt);
|
||||
|
|
|
@ -170,12 +170,18 @@ struct intel_vgpu_submission {
|
|||
|
||||
struct intel_vgpu {
|
||||
struct intel_gvt *gvt;
|
||||
struct mutex vgpu_lock;
|
||||
int id;
|
||||
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
|
||||
bool active;
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
unsigned int resetting_eng;
|
||||
|
||||
/* Both sched_data and sched_ctl can be seen a part of the global gvt
|
||||
* scheduler structure. So below 2 vgpu data are protected
|
||||
* by sched_lock, not vgpu_lock.
|
||||
*/
|
||||
void *sched_data;
|
||||
struct vgpu_sched_ctl sched_ctl;
|
||||
|
||||
|
@ -294,7 +300,13 @@ struct intel_vgpu_type {
|
|||
};
|
||||
|
||||
struct intel_gvt {
|
||||
/* GVT scope lock, protect GVT itself, and all resource currently
|
||||
* not yet protected by special locks(vgpu and scheduler lock).
|
||||
*/
|
||||
struct mutex lock;
|
||||
/* scheduler scope lock, protect gvt and vgpu schedule related data */
|
||||
struct mutex sched_lock;
|
||||
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct idr vgpu_idr; /* vGPU IDR pool */
|
||||
|
||||
|
@ -314,6 +326,10 @@ struct intel_gvt {
|
|||
|
||||
struct task_struct *service_thread;
|
||||
wait_queue_head_t service_thread_wq;
|
||||
|
||||
/* service_request is always used in bit operation, we should always
|
||||
* use it with atomic bit ops so that no need to use gvt big lock.
|
||||
*/
|
||||
unsigned long service_request;
|
||||
|
||||
struct {
|
||||
|
|
|
@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
|
|||
return D_SKL;
|
||||
else if (IS_KABYLAKE(gvt->dev_priv))
|
||||
return D_KBL;
|
||||
else if (IS_BROXTON(gvt->dev_priv))
|
||||
return D_BXT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -255,7 +257,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
|||
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
|
||||
|
||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
||||
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|
||||
|| IS_BROXTON(vgpu->gvt->dev_priv)) {
|
||||
switch (offset) {
|
||||
case FORCEWAKE_RENDER_GEN9_REG:
|
||||
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
|
||||
|
@ -316,6 +319,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
}
|
||||
}
|
||||
|
||||
/* vgpu_lock already hold by emulate mmio r/w */
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
/* sw will wait for the device to ack the reset request */
|
||||
|
@ -420,7 +424,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
|
||||
else
|
||||
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
|
||||
/* vgpu_lock already hold by emulate mmio r/w */
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
intel_gvt_check_vblank_emulation(vgpu->gvt);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -857,7 +864,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
|
|||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||
|| IS_KABYLAKE(vgpu->gvt->dev_priv))
|
||||
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|
||||
|| IS_BROXTON(vgpu->gvt->dev_priv))
|
||||
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
|
||||
/* SKL DPB/C/D aux ctl register changed */
|
||||
return 0;
|
||||
|
@ -1204,8 +1212,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
ret = handle_g2v_notification(vgpu, data);
|
||||
break;
|
||||
/* add xhot and yhot to handled list to avoid error log */
|
||||
case 0x78830:
|
||||
case 0x78834:
|
||||
case _vgtif_reg(cursor_x_hot):
|
||||
case _vgtif_reg(cursor_y_hot):
|
||||
case _vgtif_reg(pdp[0].lo):
|
||||
case _vgtif_reg(pdp[0].hi):
|
||||
case _vgtif_reg(pdp[1].lo):
|
||||
|
@ -1364,6 +1372,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
*data0 = 0x1e1a1100;
|
||||
else
|
||||
*data0 = 0x61514b3d;
|
||||
} else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
|
||||
/**
|
||||
* "Read memory latency" command on gen9.
|
||||
* Below memory latency values are read
|
||||
* from Broxton MRB.
|
||||
*/
|
||||
if (!*data0)
|
||||
*data0 = 0x16080707;
|
||||
else
|
||||
*data0 = 0x16161616;
|
||||
}
|
||||
break;
|
||||
case SKL_PCODE_CDCLK_CONTROL:
|
||||
|
@ -1421,8 +1439,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
|
|||
{
|
||||
u32 v = *(u32 *)p_data;
|
||||
|
||||
v &= (1 << 31) | (1 << 29) | (1 << 9) |
|
||||
(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
|
||||
if (IS_BROXTON(vgpu->gvt->dev_priv))
|
||||
v &= (1 << 31) | (1 << 29);
|
||||
else
|
||||
v &= (1 << 31) | (1 << 29) | (1 << 9) |
|
||||
(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
|
||||
v |= (v >> 1);
|
||||
|
||||
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
|
||||
|
@ -1442,6 +1463,102 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 v = *(u32 *)p_data;
|
||||
|
||||
if (v & BXT_DE_PLL_PLL_ENABLE)
|
||||
v |= BXT_DE_PLL_LOCK;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 v = *(u32 *)p_data;
|
||||
|
||||
if (v & PORT_PLL_ENABLE)
|
||||
v |= PORT_PLL_LOCK;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 v = *(u32 *)p_data;
|
||||
u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
|
||||
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 v = vgpu_vreg(vgpu, offset);
|
||||
|
||||
v &= ~UNIQUE_TRANGE_EN_METHOD;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
}
|
||||
|
||||
static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 v = *(u32 *)p_data;
|
||||
|
||||
if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
|
||||
vgpu_vreg(vgpu, offset - 0x600) = v;
|
||||
vgpu_vreg(vgpu, offset - 0x800) = v;
|
||||
} else {
|
||||
vgpu_vreg(vgpu, offset - 0x400) = v;
|
||||
vgpu_vreg(vgpu, offset - 0x600) = v;
|
||||
}
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 v = *(u32 *)p_data;
|
||||
|
||||
if (v & BIT(0)) {
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
|
||||
~PHY_RESERVED;
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
|
||||
PHY_POWER_GOOD;
|
||||
}
|
||||
|
||||
if (v & BIT(1)) {
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
|
||||
~PHY_RESERVED;
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
|
||||
PHY_POWER_GOOD;
|
||||
}
|
||||
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
|
@ -2665,17 +2782,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
|
||||
MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
|
||||
MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
|
||||
MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
|
||||
MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
|
||||
MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
|
||||
MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
|
||||
MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
|
||||
MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
|
||||
|
||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
|
||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
|
||||
|
@ -2800,53 +2917,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
|
||||
|
||||
MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
|
||||
MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
|
||||
MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
|
||||
NULL, NULL);
|
||||
MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
|
||||
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
|
||||
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
/* TRTT */
|
||||
MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
|
||||
MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
|
||||
MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
|
||||
NULL, gen9_trtte_write);
|
||||
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
|
||||
|
||||
MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
|
||||
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
|
||||
|
||||
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
|
||||
MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
|
||||
|
@ -2864,11 +2985,185 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
|
||||
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
|
||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(_MMIO(0x4ab8), D_KBL);
|
||||
MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
|
||||
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_bxt_mmio_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int ret;
|
||||
|
||||
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
|
||||
|
||||
MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
|
||||
MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
|
||||
MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
|
||||
MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
|
||||
MMIO_D(ERROR_GEN6, D_BXT);
|
||||
MMIO_D(DONE_REG, D_BXT);
|
||||
MMIO_D(EIR, D_BXT);
|
||||
MMIO_D(PGTBL_ER, D_BXT);
|
||||
MMIO_D(_MMIO(0x4194), D_BXT);
|
||||
MMIO_D(_MMIO(0x4294), D_BXT);
|
||||
MMIO_D(_MMIO(0x4494), D_BXT);
|
||||
|
||||
MMIO_RING_D(RING_PSMI_CTL, D_BXT);
|
||||
MMIO_RING_D(RING_DMA_FADD, D_BXT);
|
||||
MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
|
||||
MMIO_RING_D(RING_IPEHR, D_BXT);
|
||||
MMIO_RING_D(RING_INSTPS, D_BXT);
|
||||
MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
|
||||
MMIO_RING_D(RING_BBSTATE, D_BXT);
|
||||
MMIO_RING_D(RING_IPEIR, D_BXT);
|
||||
|
||||
MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
|
||||
|
||||
MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
|
||||
MMIO_D(BXT_RP_STATE_CAP, D_BXT);
|
||||
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
|
||||
NULL, bxt_phy_ctl_family_write);
|
||||
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
|
||||
NULL, bxt_phy_ctl_family_write);
|
||||
MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
|
||||
MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
|
||||
MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
|
||||
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
|
||||
NULL, bxt_port_pll_enable_write);
|
||||
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
|
||||
NULL, bxt_port_pll_enable_write);
|
||||
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
|
||||
bxt_port_pll_enable_write);
|
||||
|
||||
MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
|
||||
MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
|
||||
|
||||
MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
|
||||
MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
|
||||
|
||||
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
|
||||
NULL, bxt_pcs_dw12_grp_write);
|
||||
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
|
||||
bxt_port_tx_dw3_read, NULL);
|
||||
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
|
||||
|
||||
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
|
||||
NULL, bxt_pcs_dw12_grp_write);
|
||||
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
|
||||
bxt_port_tx_dw3_read, NULL);
|
||||
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
|
||||
|
||||
MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
|
||||
NULL, bxt_pcs_dw12_grp_write);
|
||||
MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
|
||||
bxt_port_tx_dw3_read, NULL);
|
||||
MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
|
||||
MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
|
||||
MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
|
||||
|
||||
MMIO_D(BXT_DE_PLL_CTL, D_BXT);
|
||||
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
|
||||
MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
|
||||
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
|
||||
|
||||
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
|
||||
|
||||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
|
||||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
|
||||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
|
||||
|
||||
MMIO_D(RC6_CTX_BASE, D_BXT);
|
||||
|
||||
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
|
||||
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
|
||||
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
|
||||
MMIO_D(GEN6_GFXPAUSE, D_BXT);
|
||||
MMIO_D(GEN8_L3SQCREG1, D_BXT);
|
||||
|
||||
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2960,6 +3255,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
|||
ret = init_skl_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
ret = init_broadwell_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = init_skl_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = init_bxt_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
gvt->mmio.mmio_block = mmio_blocks;
|
||||
|
|
|
@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
|
|||
clear_bits |= (1 << bit);
|
||||
}
|
||||
|
||||
WARN_ON(!up_irq_info);
|
||||
if (WARN_ON(!up_irq_info))
|
||||
return;
|
||||
|
||||
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
|
||||
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
|
||||
|
@ -580,7 +581,9 @@ static void gen8_init_irq(
|
|||
|
||||
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||
} else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
|
||||
} else if (IS_SKYLAKE(gvt->dev_priv)
|
||||
|| IS_KABYLAKE(gvt->dev_priv)
|
||||
|| IS_BROXTON(gvt->dev_priv)) {
|
||||
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
|
@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
|
|||
|
||||
gvt_dbg_core("init irq framework\n");
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||
irq->ops = &gen8_irq_ops;
|
||||
irq->irq_map = gen8_irq_map;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
return -ENODEV;
|
||||
}
|
||||
irq->ops = &gen8_irq_ops;
|
||||
irq->irq_map = gen8_irq_map;
|
||||
|
||||
/* common event initialization */
|
||||
init_events(irq);
|
||||
|
|
|
@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
return;
|
||||
|
||||
gvt = vgpu->gvt;
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
if (reg_is_mmio(gvt, offset)) {
|
||||
if (read)
|
||||
|
@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
memcpy(pt, p_data, bytes);
|
||||
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
|
@ -156,7 +156,7 @@ err:
|
|||
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
|
||||
offset, bytes);
|
||||
out:
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
|
@ -220,7 +220,7 @@ err:
|
|||
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
|
||||
bytes);
|
||||
out:
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,15 +42,16 @@ struct intel_vgpu;
|
|||
#define D_BDW (1 << 0)
|
||||
#define D_SKL (1 << 1)
|
||||
#define D_KBL (1 << 2)
|
||||
#define D_BXT (1 << 3)
|
||||
|
||||
#define D_GEN9PLUS (D_SKL | D_KBL)
|
||||
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
|
||||
#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
|
||||
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
|
||||
|
||||
#define D_SKL_PLUS (D_SKL | D_KBL)
|
||||
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
|
||||
#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
|
||||
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
|
||||
|
||||
#define D_PRE_SKL (D_BDW)
|
||||
#define D_ALL (D_BDW | D_SKL | D_KBL)
|
||||
#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
|
||||
|
||||
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
|
||||
unsigned int);
|
||||
|
|
|
@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
|||
*/
|
||||
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
|
||||
FW_REG_READ | FW_REG_WRITE);
|
||||
if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||
if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
|
||||
IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
|
||||
fw |= FORCEWAKE_RENDER;
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, fw);
|
||||
|
@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
|
|||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
|
||||
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
|
||||
return;
|
||||
|
||||
if (!pre && !gen9_render_mocs.initialized)
|
||||
|
@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
|
|||
u32 old_v, new_v;
|
||||
|
||||
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv))
|
||||
switch_mocs(pre, next, ring_id);
|
||||
|
||||
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
|
||||
|
@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
|
|||
* state image on kabylake, it's initialized by lri command and
|
||||
* save or restore with context together.
|
||||
*/
|
||||
if (IS_KABYLAKE(dev_priv) && mmio->in_context)
|
||||
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
|
||||
&& mmio->in_context)
|
||||
continue;
|
||||
|
||||
// save
|
||||
|
@ -574,7 +578,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
|
|||
{
|
||||
struct engine_mmio *mmio;
|
||||
|
||||
if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
|
||||
if (IS_SKYLAKE(gvt->dev_priv) ||
|
||||
IS_KABYLAKE(gvt->dev_priv) ||
|
||||
IS_BROXTON(gvt->dev_priv))
|
||||
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
|
||||
else
|
||||
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
|
||||
|
|
|
@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
|
|||
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
|
||||
void *data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_vgpu_page_track *page_track;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
|
||||
if (!page_track) {
|
||||
|
@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
|
|||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
|
|||
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
|
||||
ktime_t cur_time;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
cur_time = ktime_get();
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
|
@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
|
|||
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
|
||||
tbs_sched_func(sched_data);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
|
||||
|
@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
|||
|
||||
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
|
||||
{
|
||||
gvt->scheduler.sched_ops = &tbs_schedule_ops;
|
||||
int ret;
|
||||
|
||||
return gvt->scheduler.sched_ops->init(gvt);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
gvt->scheduler.sched_ops = &tbs_schedule_ops;
|
||||
ret = gvt->scheduler.sched_ops->init(gvt);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
|
||||
{
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
gvt->scheduler.sched_ops->clean(gvt);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
}
|
||||
|
||||
/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
|
||||
* sched_data, and sched_ctl. We see these 2 data as part of
|
||||
* the global scheduler which are proteced by gvt->sched_lock.
|
||||
* Caller should make their decision if the vgpu_lock should
|
||||
* be hold outside.
|
||||
*/
|
||||
|
||||
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
||||
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
if (!vgpu_data->active) {
|
||||
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
|
||||
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
|
||||
}
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
||||
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
|
||||
{
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
}
|
||||
|
||||
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
|
@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
|||
|
||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
scheduler->sched_ops->stop_schedule(vgpu);
|
||||
|
||||
if (scheduler->next_vgpu == vgpu)
|
||||
|
@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
|||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
|
|
@ -45,11 +45,10 @@ static void set_context_pdp_root_pointer(
|
|||
struct execlist_ring_context *ring_context,
|
||||
u32 pdp[8])
|
||||
{
|
||||
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
pdp_pair[i].val = pdp[7 - i];
|
||||
ring_context->pdps[i].val = pdp[7 - i];
|
||||
}
|
||||
|
||||
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
|
||||
|
@ -298,7 +297,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
|||
void *shadow_ring_buffer_va;
|
||||
u32 *cs;
|
||||
|
||||
if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
|
||||
if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
|
||||
&& is_inhibit_context(req->hw_context))
|
||||
intel_vgpu_restore_inhibit_context(vgpu, req);
|
||||
|
||||
/* allocate shadow ring buffer */
|
||||
|
@ -634,6 +634,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||
ring_id, workload);
|
||||
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
|
@ -654,6 +655,7 @@ out:
|
|||
}
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -663,7 +665,7 @@ static struct intel_vgpu_workload *pick_next_workload(
|
|||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
|
||||
/*
|
||||
* no current vgpu / will be scheduled out / no workload
|
||||
|
@ -709,7 +711,7 @@ static struct intel_vgpu_workload *pick_next_workload(
|
|||
|
||||
atomic_inc(&workload->vgpu->submission.running_workload_num);
|
||||
out:
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
return workload;
|
||||
}
|
||||
|
||||
|
@ -807,7 +809,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
struct i915_request *rq = workload->req;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
|
||||
/* For the workload w/ request, needs to wait for the context
|
||||
* switch to make sure request is completed.
|
||||
|
@ -883,7 +886,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
if (gvt->scheduler.need_reschedule)
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
struct workload_thread_param {
|
||||
|
@ -901,7 +905,8 @@ static int workload_thread(void *priv)
|
|||
struct intel_vgpu *vgpu = NULL;
|
||||
int ret;
|
||||
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
|
||||
|| IS_KABYLAKE(gvt->dev_priv);
|
||||
|| IS_KABYLAKE(gvt->dev_priv)
|
||||
|| IS_BROXTON(gvt->dev_priv);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
|
||||
kfree(p);
|
||||
|
@ -935,9 +940,7 @@ static int workload_thread(void *priv)
|
|||
intel_uncore_forcewake_get(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
ret = dispatch_workload(workload);
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
if (ret) {
|
||||
vgpu = workload->vgpu;
|
||||
|
@ -1228,7 +1231,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
|
|||
u64 gpa;
|
||||
int i;
|
||||
|
||||
gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
|
||||
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
|
|
|
@ -58,6 +58,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
|||
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
|
||||
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
|
||||
|
||||
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
|
||||
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
|
||||
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
|
||||
|
@ -223,22 +226,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
|||
*/
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
vgpu->active = false;
|
||||
|
||||
if (atomic_read(&vgpu->submission.running_workload_num)) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -252,14 +253,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
WARN(vgpu->active, "vGPU is still active!\n");
|
||||
|
||||
intel_gvt_debugfs_remove_vgpu(vgpu);
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
if (idr_is_empty(&gvt->vgpu_idr))
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_vgpu_clean_sched_policy(vgpu);
|
||||
intel_vgpu_clean_submission(vgpu);
|
||||
intel_vgpu_clean_display(vgpu);
|
||||
|
@ -269,10 +267,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
|||
intel_vgpu_free_resource(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
if (idr_is_empty(&gvt->vgpu_idr))
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
vfree(vgpu);
|
||||
}
|
||||
|
||||
#define IDLE_VGPU_IDR 0
|
||||
|
@ -298,6 +302,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
|
|||
|
||||
vgpu->id = IDLE_VGPU_IDR;
|
||||
vgpu->gvt = gvt;
|
||||
mutex_init(&vgpu->vgpu_lock);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
|
||||
|
@ -324,7 +329,10 @@ out_free_vgpu:
|
|||
*/
|
||||
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
intel_vgpu_clean_sched_policy(vgpu);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
|
||||
vfree(vgpu);
|
||||
}
|
||||
|
||||
|
@ -342,8 +350,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
if (!vgpu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
|
@ -353,6 +359,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
vgpu->handle = param->handle;
|
||||
vgpu->gvt = gvt;
|
||||
vgpu->sched_ctl.weight = param->weight;
|
||||
mutex_init(&vgpu->vgpu_lock);
|
||||
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
|
||||
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
|
||||
idr_init(&vgpu->object_idr);
|
||||
|
@ -400,8 +407,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
return vgpu;
|
||||
|
||||
out_clean_sched_policy:
|
||||
|
@ -424,7 +429,6 @@ out_clean_idr:
|
|||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
out_free_vgpu:
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -456,12 +460,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
|
||||
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
|
||||
if (IS_ERR(vgpu))
|
||||
return vgpu;
|
||||
|
||||
/* calculate left instance change for types */
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
if (!IS_ERR(vgpu))
|
||||
/* calculate left instance change for types */
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
return vgpu;
|
||||
}
|
||||
|
@ -473,7 +477,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
* @engine_mask: engines to reset for GT reset
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU through
|
||||
* device model reset or GT reset. The caller should hold the gvt lock.
|
||||
* device model reset or GT reset. The caller should hold the vgpu lock.
|
||||
*
|
||||
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
|
||||
* the whole vGPU to default state as when it is created. This vGPU function
|
||||
|
@ -513,9 +517,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_submission(vgpu, resetting_eng);
|
||||
|
@ -555,7 +559,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
*/
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
|
|
@ -94,7 +94,10 @@ struct vgt_if {
|
|||
u32 rsv5[4];
|
||||
|
||||
u32 g2v_notify;
|
||||
u32 rsv6[7];
|
||||
u32 rsv6[5];
|
||||
|
||||
u32 cursor_x_hot;
|
||||
u32 cursor_y_hot;
|
||||
|
||||
struct {
|
||||
u32 lo;
|
||||
|
|
|
@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
|
|||
return true;
|
||||
if (IS_KABYLAKE(dev_priv))
|
||||
return true;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче