Merge tag 'gvt-next-2017-03-30' of https://github.com/01org/gvt-linux into drm-intel-next-queued
gvt-next-2017-03-30 - Add mdev attribute group for per-vgpu info - Time slice based vGPU scheduling QoS support (Gao Ping) - Initial KBL support for E3 server (Han Xu) - other misc. Link: http://patchwork.freedesktop.org/patch/msgid/20170330100516.dkavi3rtlsmnoepi@zhen-hp.sh.intel.com Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
commit
a69035ebdf
|
@ -1215,7 +1215,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
|
||||||
if (!info->async_flip)
|
if (!info->async_flip)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv)) {
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||||
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
|
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
|
||||||
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
|
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
|
||||||
GENMASK(12, 10)) >> 10;
|
GENMASK(12, 10)) >> 10;
|
||||||
|
@ -1243,7 +1243,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
|
||||||
|
|
||||||
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
|
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
|
||||||
info->surf_val << 12);
|
info->surf_val << 12);
|
||||||
if (IS_SKYLAKE(dev_priv)) {
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||||
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
|
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
|
||||||
info->stride_val);
|
info->stride_val);
|
||||||
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
|
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
|
||||||
|
@ -1267,7 +1267,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
|
||||||
|
|
||||||
if (IS_BROADWELL(dev_priv))
|
if (IS_BROADWELL(dev_priv))
|
||||||
return gen8_decode_mi_display_flip(s, info);
|
return gen8_decode_mi_display_flip(s, info);
|
||||||
if (IS_SKYLAKE(dev_priv))
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||||
return skl_decode_mi_display_flip(s, info);
|
return skl_decode_mi_display_flip(s, info);
|
||||||
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -1278,7 +1278,9 @@ static int check_mi_display_flip(struct parser_exec_state *s,
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||||
|
|
||||||
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
|
if (IS_BROADWELL(dev_priv)
|
||||||
|
|| IS_SKYLAKE(dev_priv)
|
||||||
|
|| IS_KABYLAKE(dev_priv))
|
||||||
return gen8_check_mi_display_flip(s, info);
|
return gen8_check_mi_display_flip(s, info);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -1289,7 +1291,9 @@ static int update_plane_mmio_from_mi_display_flip(
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||||
|
|
||||||
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
|
if (IS_BROADWELL(dev_priv)
|
||||||
|
|| IS_SKYLAKE(dev_priv)
|
||||||
|
|| IS_KABYLAKE(dev_priv))
|
||||||
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
|
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -1569,7 +1573,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
|
||||||
{
|
{
|
||||||
struct intel_gvt *gvt = s->vgpu->gvt;
|
struct intel_gvt *gvt = s->vgpu->gvt;
|
||||||
|
|
||||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||||
/* BDW decides privilege based on address space */
|
/* BDW decides privilege based on address space */
|
||||||
if (cmd_val(s, 0) & (1 << 8))
|
if (cmd_val(s, 0) & (1 << 8))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2604,6 +2609,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
|
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
|
||||||
struct parser_exec_state s;
|
struct parser_exec_state s;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
||||||
|
struct intel_vgpu_workload,
|
||||||
|
wa_ctx);
|
||||||
|
|
||||||
/* ring base is page aligned */
|
/* ring base is page aligned */
|
||||||
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
|
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
|
||||||
|
@ -2618,14 +2626,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
|
|
||||||
s.buf_type = RING_BUFFER_INSTRUCTION;
|
s.buf_type = RING_BUFFER_INSTRUCTION;
|
||||||
s.buf_addr_type = GTT_BUFFER;
|
s.buf_addr_type = GTT_BUFFER;
|
||||||
s.vgpu = wa_ctx->workload->vgpu;
|
s.vgpu = workload->vgpu;
|
||||||
s.ring_id = wa_ctx->workload->ring_id;
|
s.ring_id = workload->ring_id;
|
||||||
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
|
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
|
||||||
s.ring_size = ring_size;
|
s.ring_size = ring_size;
|
||||||
s.ring_head = gma_head;
|
s.ring_head = gma_head;
|
||||||
s.ring_tail = gma_tail;
|
s.ring_tail = gma_tail;
|
||||||
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
|
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
|
||||||
s.workload = wa_ctx->workload;
|
s.workload = workload;
|
||||||
|
|
||||||
ret = ip_gma_set(&s, gma_head);
|
ret = ip_gma_set(&s, gma_head);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -2708,12 +2716,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
{
|
{
|
||||||
int ctx_size = wa_ctx->indirect_ctx.size;
|
int ctx_size = wa_ctx->indirect_ctx.size;
|
||||||
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
||||||
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
||||||
|
struct intel_vgpu_workload,
|
||||||
|
wa_ctx);
|
||||||
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
void *map;
|
void *map;
|
||||||
|
|
||||||
obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
|
obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
|
||||||
roundup(ctx_size + CACHELINE_BYTES,
|
roundup(ctx_size + CACHELINE_BYTES,
|
||||||
PAGE_SIZE));
|
PAGE_SIZE));
|
||||||
if (IS_ERR(obj))
|
if (IS_ERR(obj))
|
||||||
|
@ -2733,8 +2744,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
|
ret = copy_gma_to_hva(workload->vgpu,
|
||||||
wa_ctx->workload->vgpu->gtt.ggtt_mm,
|
workload->vgpu->gtt.ggtt_mm,
|
||||||
guest_gma, guest_gma + ctx_size,
|
guest_gma, guest_gma + ctx_size,
|
||||||
map);
|
map);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -2772,7 +2783,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
||||||
|
struct intel_vgpu_workload,
|
||||||
|
wa_ctx);
|
||||||
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
|
|
||||||
if (wa_ctx->indirect_ctx.size == 0)
|
if (wa_ctx->indirect_ctx.size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -161,8 +161,9 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
|
||||||
|
|
||||||
#define DPCD_HEADER_SIZE 0xb
|
#define DPCD_HEADER_SIZE 0xb
|
||||||
|
|
||||||
|
/* let the virtual display supports DP1.2 */
|
||||||
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
||||||
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||||
};
|
};
|
||||||
|
|
||||||
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
|
@ -172,9 +173,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
SDE_PORTC_HOTPLUG_CPT |
|
SDE_PORTC_HOTPLUG_CPT |
|
||||||
SDE_PORTD_HOTPLUG_CPT);
|
SDE_PORTD_HOTPLUG_CPT);
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv))
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||||
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
||||||
SDE_PORTE_HOTPLUG_SPT);
|
SDE_PORTE_HOTPLUG_SPT);
|
||||||
|
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
|
||||||
|
SKL_FUSE_DOWNLOAD_STATUS |
|
||||||
|
SKL_FUSE_PG0_DIST_STATUS |
|
||||||
|
SKL_FUSE_PG1_DIST_STATUS |
|
||||||
|
SKL_FUSE_PG2_DIST_STATUS;
|
||||||
|
vgpu_vreg(vgpu, LCPLL1_CTL) |=
|
||||||
|
LCPLL_PLL_ENABLE |
|
||||||
|
LCPLL_PLL_LOCK;
|
||||||
|
vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||||
|
@ -191,7 +203,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv) &&
|
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||||
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
|
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
|
||||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
|
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
|
||||||
}
|
}
|
||||||
|
@ -353,7 +365,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv))
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||||
clean_virtual_dp_monitor(vgpu, PORT_D);
|
clean_virtual_dp_monitor(vgpu, PORT_D);
|
||||||
else
|
else
|
||||||
clean_virtual_dp_monitor(vgpu, PORT_B);
|
clean_virtual_dp_monitor(vgpu, PORT_B);
|
||||||
|
@ -375,7 +387,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
|
||||||
|
|
||||||
intel_vgpu_init_i2c_edid(vgpu);
|
intel_vgpu_init_i2c_edid(vgpu);
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv))
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||||
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
|
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
|
||||||
resolution);
|
resolution);
|
||||||
else
|
else
|
||||||
|
|
|
@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||||
|
|
||||||
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
{
|
{
|
||||||
int ring_id = wa_ctx->workload->ring_id;
|
struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
||||||
struct i915_gem_context *shadow_ctx =
|
struct intel_vgpu_workload,
|
||||||
wa_ctx->workload->vgpu->shadow_ctx;
|
wa_ctx);
|
||||||
|
int ring_id = workload->ring_id;
|
||||||
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||||
struct drm_i915_gem_object *ctx_obj =
|
struct drm_i915_gem_object *ctx_obj =
|
||||||
shadow_ctx->engine[ring_id].state->obj;
|
shadow_ctx->engine[ring_id].state->obj;
|
||||||
struct execlist_ring_context *shadow_ring_context;
|
struct execlist_ring_context *shadow_ring_context;
|
||||||
|
@ -680,7 +682,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
CACHELINE_BYTES;
|
CACHELINE_BYTES;
|
||||||
workload->wa_ctx.per_ctx.guest_gma =
|
workload->wa_ctx.per_ctx.guest_gma =
|
||||||
per_ctx & PER_CTX_ADDR_MASK;
|
per_ctx & PER_CTX_ADDR_MASK;
|
||||||
workload->wa_ctx.workload = workload;
|
|
||||||
|
|
||||||
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
|
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
|
||||||
}
|
}
|
||||||
|
|
|
@ -2220,7 +2220,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||||
|
|
||||||
gvt_dbg_core("init gtt\n");
|
gvt_dbg_core("init gtt\n");
|
||||||
|
|
||||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||||
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
|
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
|
||||||
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
|
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
|
||||||
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
|
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
|
||||||
|
|
|
@ -106,7 +106,8 @@ static void init_device_info(struct intel_gvt *gvt)
|
||||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||||
|
|
||||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||||
info->max_support_vgpus = 8;
|
info->max_support_vgpus = 8;
|
||||||
info->cfg_space_size = 256;
|
info->cfg_space_size = 256;
|
||||||
info->mmio_size = 2 * 1024 * 1024;
|
info->mmio_size = 2 * 1024 * 1024;
|
||||||
|
@ -143,6 +144,11 @@ static int gvt_service_thread(void *data)
|
||||||
intel_gvt_emulate_vblank(gvt);
|
intel_gvt_emulate_vblank(gvt);
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
|
||||||
|
(void *)&gvt->service_request)) {
|
||||||
|
intel_gvt_schedule(gvt);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -196,6 +202,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
idr_destroy(&gvt->vgpu_idr);
|
idr_destroy(&gvt->vgpu_idr);
|
||||||
|
|
||||||
|
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||||
|
|
||||||
kfree(dev_priv->gvt);
|
kfree(dev_priv->gvt);
|
||||||
dev_priv->gvt = NULL;
|
dev_priv->gvt = NULL;
|
||||||
}
|
}
|
||||||
|
@ -214,6 +222,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||||
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct intel_gvt *gvt;
|
struct intel_gvt *gvt;
|
||||||
|
struct intel_vgpu *vgpu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -286,6 +295,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||||
goto out_clean_types;
|
goto out_clean_types;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vgpu = intel_gvt_create_idle_vgpu(gvt);
|
||||||
|
if (IS_ERR(vgpu)) {
|
||||||
|
ret = PTR_ERR(vgpu);
|
||||||
|
gvt_err("failed to create idle vgpu\n");
|
||||||
|
goto out_clean_types;
|
||||||
|
}
|
||||||
|
gvt->idle_vgpu = vgpu;
|
||||||
|
|
||||||
gvt_dbg_core("gvt device initialization is done\n");
|
gvt_dbg_core("gvt device initialization is done\n");
|
||||||
dev_priv->gvt = gvt;
|
dev_priv->gvt = gvt;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -138,6 +138,10 @@ struct intel_vgpu_display {
|
||||||
struct intel_vgpu_sbi sbi;
|
struct intel_vgpu_sbi sbi;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct vgpu_sched_ctl {
|
||||||
|
int weight;
|
||||||
|
};
|
||||||
|
|
||||||
struct intel_vgpu {
|
struct intel_vgpu {
|
||||||
struct intel_gvt *gvt;
|
struct intel_gvt *gvt;
|
||||||
int id;
|
int id;
|
||||||
|
@ -147,6 +151,7 @@ struct intel_vgpu {
|
||||||
bool failsafe;
|
bool failsafe;
|
||||||
bool resetting;
|
bool resetting;
|
||||||
void *sched_data;
|
void *sched_data;
|
||||||
|
struct vgpu_sched_ctl sched_ctl;
|
||||||
|
|
||||||
struct intel_vgpu_fence fence;
|
struct intel_vgpu_fence fence;
|
||||||
struct intel_vgpu_gm gm;
|
struct intel_vgpu_gm gm;
|
||||||
|
@ -160,6 +165,7 @@ struct intel_vgpu {
|
||||||
struct list_head workload_q_head[I915_NUM_ENGINES];
|
struct list_head workload_q_head[I915_NUM_ENGINES];
|
||||||
struct kmem_cache *workloads;
|
struct kmem_cache *workloads;
|
||||||
atomic_t running_workload_num;
|
atomic_t running_workload_num;
|
||||||
|
ktime_t last_ctx_submit_time;
|
||||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||||
struct i915_gem_context *shadow_ctx;
|
struct i915_gem_context *shadow_ctx;
|
||||||
|
|
||||||
|
@ -215,6 +221,7 @@ struct intel_vgpu_type {
|
||||||
unsigned int low_gm_size;
|
unsigned int low_gm_size;
|
||||||
unsigned int high_gm_size;
|
unsigned int high_gm_size;
|
||||||
unsigned int fence;
|
unsigned int fence;
|
||||||
|
unsigned int weight;
|
||||||
enum intel_vgpu_edid resolution;
|
enum intel_vgpu_edid resolution;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -236,6 +243,7 @@ struct intel_gvt {
|
||||||
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
|
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
|
||||||
struct intel_vgpu_type *types;
|
struct intel_vgpu_type *types;
|
||||||
unsigned int num_types;
|
unsigned int num_types;
|
||||||
|
struct intel_vgpu *idle_vgpu;
|
||||||
|
|
||||||
struct task_struct *service_thread;
|
struct task_struct *service_thread;
|
||||||
wait_queue_head_t service_thread_wq;
|
wait_queue_head_t service_thread_wq;
|
||||||
|
@ -249,6 +257,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
|
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
|
||||||
|
INTEL_GVT_REQUEST_SCHED = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
|
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
|
||||||
|
@ -322,6 +331,8 @@ struct intel_vgpu_creation_params {
|
||||||
__u64 resolution;
|
__u64 resolution;
|
||||||
__s32 primary;
|
__s32 primary;
|
||||||
__u64 vgpu_id;
|
__u64 vgpu_id;
|
||||||
|
|
||||||
|
__u32 weight;
|
||||||
};
|
};
|
||||||
|
|
||||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||||
|
@ -376,6 +387,8 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
|
||||||
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
|
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
|
||||||
void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
|
void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
|
||||||
|
|
||||||
|
struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
|
||||||
|
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
|
||||||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
struct intel_vgpu_type *type);
|
struct intel_vgpu_type *type);
|
||||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||||
|
|
|
@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
|
||||||
return D_BDW;
|
return D_BDW;
|
||||||
else if (IS_SKYLAKE(gvt->dev_priv))
|
else if (IS_SKYLAKE(gvt->dev_priv))
|
||||||
return D_SKL;
|
return D_SKL;
|
||||||
|
else if (IS_KABYLAKE(gvt->dev_priv))
|
||||||
|
return D_KBL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||||
old = vgpu_vreg(vgpu, offset);
|
old = vgpu_vreg(vgpu, offset);
|
||||||
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
|
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
|
||||||
|
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
||||||
switch (offset) {
|
switch (offset) {
|
||||||
case FORCEWAKE_RENDER_GEN9_REG:
|
case FORCEWAKE_RENDER_GEN9_REG:
|
||||||
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
|
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
|
||||||
|
@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||||
write_vreg(vgpu, offset, p_data, bytes);
|
write_vreg(vgpu, offset, p_data, bytes);
|
||||||
data = vgpu_vreg(vgpu, offset);
|
data = vgpu_vreg(vgpu, offset);
|
||||||
|
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
|
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||||
offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
|
|| IS_KABYLAKE(vgpu->gvt->dev_priv))
|
||||||
|
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
|
||||||
/* SKL DPB/C/D aux ctl register changed */
|
/* SKL DPB/C/D aux ctl register changed */
|
||||||
return 0;
|
return 0;
|
||||||
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
|
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
|
||||||
|
@ -1303,7 +1307,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case GEN9_PCODE_READ_MEM_LATENCY:
|
case GEN9_PCODE_READ_MEM_LATENCY:
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
||||||
/**
|
/**
|
||||||
* "Read memory latency" command on gen9.
|
* "Read memory latency" command on gen9.
|
||||||
* Below memory latency values are read
|
* Below memory latency values are read
|
||||||
|
@ -1316,7 +1321,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case SKL_PCODE_CDCLK_CONTROL:
|
case SKL_PCODE_CDCLK_CONTROL:
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv))
|
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(vgpu->gvt->dev_priv))
|
||||||
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
|
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
|
||||||
break;
|
break;
|
||||||
case GEN6_PCODE_READ_RC6VIDS:
|
case GEN6_PCODE_READ_RC6VIDS:
|
||||||
|
@ -1410,6 +1416,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
|
|
||||||
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
|
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
|
||||||
if (execlist->elsp_dwords.index == 3) {
|
if (execlist->elsp_dwords.index == 3) {
|
||||||
|
vgpu->last_ctx_submit_time = ktime_get();
|
||||||
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
||||||
if(ret)
|
if(ret)
|
||||||
gvt_vgpu_err("fail submit workload on ring %d\n",
|
gvt_vgpu_err("fail submit workload on ring %d\n",
|
||||||
|
@ -2584,219 +2591,232 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
||||||
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
|
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
|
MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
|
||||||
MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
|
dp_aux_ch_ctl_mmio_write);
|
||||||
MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
|
MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
|
||||||
|
dp_aux_ch_ctl_mmio_write);
|
||||||
|
MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
|
||||||
|
dp_aux_ch_ctl_mmio_write);
|
||||||
|
|
||||||
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
|
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
|
||||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
|
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
|
||||||
|
skl_power_well_ctl_write);
|
||||||
|
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
|
||||||
|
|
||||||
MMIO_D(0xa210, D_SKL_PLUS);
|
MMIO_D(0xa210, D_SKL_PLUS);
|
||||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||||
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||||
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
|
MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
|
||||||
MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
|
MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
|
||||||
MMIO_D(0x45504, D_SKL);
|
MMIO_D(0x45504, D_SKL_PLUS);
|
||||||
MMIO_D(0x45520, D_SKL);
|
MMIO_D(0x45520, D_SKL_PLUS);
|
||||||
MMIO_D(0x46000, D_SKL);
|
MMIO_D(0x46000, D_SKL_PLUS);
|
||||||
MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
|
MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
|
||||||
MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
|
MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
|
||||||
MMIO_D(0x6C040, D_SKL);
|
MMIO_D(0x6C040, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6C048, D_SKL);
|
MMIO_D(0x6C048, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6C050, D_SKL);
|
MMIO_D(0x6C050, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6C044, D_SKL);
|
MMIO_D(0x6C044, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6C04C, D_SKL);
|
MMIO_D(0x6C04C, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6C054, D_SKL);
|
MMIO_D(0x6C054, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6c058, D_SKL);
|
MMIO_D(0x6c058, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6c05c, D_SKL);
|
MMIO_D(0x6c05c, D_SKL | D_KBL);
|
||||||
MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
|
MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
|
||||||
|
|
||||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
|
|
||||||
MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
|
|
||||||
MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
|
||||||
MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
|
MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
|
||||||
|
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
|
MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
|
MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
|
MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
|
MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
|
MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
|
MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
|
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
|
MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
|
||||||
|
|
||||||
MMIO_D(0x70380, D_SKL);
|
MMIO_D(0x70380, D_SKL_PLUS);
|
||||||
MMIO_D(0x71380, D_SKL);
|
MMIO_D(0x71380, D_SKL_PLUS);
|
||||||
MMIO_D(0x72380, D_SKL);
|
MMIO_D(0x72380, D_SKL_PLUS);
|
||||||
MMIO_D(0x7039c, D_SKL);
|
MMIO_D(0x7039c, D_SKL_PLUS);
|
||||||
|
|
||||||
MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||||
MMIO_D(0x8f074, D_SKL);
|
MMIO_D(0x8f074, D_SKL | D_KBL);
|
||||||
MMIO_D(0x8f004, D_SKL);
|
MMIO_D(0x8f004, D_SKL | D_KBL);
|
||||||
MMIO_D(0x8f034, D_SKL);
|
MMIO_D(0x8f034, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_D(0xb11c, D_SKL);
|
MMIO_D(0xb11c, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_D(0x51000, D_SKL);
|
MMIO_D(0x51000, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6c00c, D_SKL);
|
MMIO_D(0x6c00c, D_SKL_PLUS);
|
||||||
|
|
||||||
MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
|
||||||
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
|
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
|
||||||
|
|
||||||
MMIO_D(0xd08, D_SKL);
|
MMIO_D(0xd08, D_SKL_PLUS);
|
||||||
MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
|
MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
|
||||||
MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||||
|
|
||||||
/* TRTT */
|
/* TRTT */
|
||||||
MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
|
MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
|
||||||
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
|
MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
|
||||||
|
|
||||||
MMIO_D(0x45008, D_SKL);
|
MMIO_D(0x45008, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_D(0x46430, D_SKL);
|
MMIO_D(0x46430, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_D(0x46520, D_SKL);
|
MMIO_D(0x46520, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_D(0xc403c, D_SKL);
|
MMIO_D(0xc403c, D_SKL | D_KBL);
|
||||||
MMIO_D(0xb004, D_SKL);
|
MMIO_D(0xb004, D_SKL_PLUS);
|
||||||
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
|
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
|
||||||
|
|
||||||
MMIO_D(0x65900, D_SKL);
|
MMIO_D(0x65900, D_SKL_PLUS);
|
||||||
MMIO_D(0x1082c0, D_SKL);
|
MMIO_D(0x1082c0, D_SKL | D_KBL);
|
||||||
MMIO_D(0x4068, D_SKL);
|
MMIO_D(0x4068, D_SKL | D_KBL);
|
||||||
MMIO_D(0x67054, D_SKL);
|
MMIO_D(0x67054, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6e560, D_SKL);
|
MMIO_D(0x6e560, D_SKL | D_KBL);
|
||||||
MMIO_D(0x6e554, D_SKL);
|
MMIO_D(0x6e554, D_SKL | D_KBL);
|
||||||
MMIO_D(0x2b20, D_SKL);
|
MMIO_D(0x2b20, D_SKL | D_KBL);
|
||||||
MMIO_D(0x65f00, D_SKL);
|
MMIO_D(0x65f00, D_SKL | D_KBL);
|
||||||
MMIO_D(0x65f08, D_SKL);
|
MMIO_D(0x65f08, D_SKL | D_KBL);
|
||||||
MMIO_D(0x320f0, D_SKL);
|
MMIO_D(0x320f0, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_D(0x70034, D_SKL);
|
MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_D(0x71034, D_SKL);
|
MMIO_D(0x70034, D_SKL_PLUS);
|
||||||
MMIO_D(0x72034, D_SKL);
|
MMIO_D(0x71034, D_SKL_PLUS);
|
||||||
|
MMIO_D(0x72034, D_SKL_PLUS);
|
||||||
|
|
||||||
MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
|
MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
|
||||||
MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
|
MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
|
||||||
MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
|
MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
|
||||||
MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
|
MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
|
||||||
MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
|
MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
|
||||||
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
|
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
|
||||||
|
|
||||||
MMIO_D(0x44500, D_SKL);
|
MMIO_D(0x44500, D_SKL_PLUS);
|
||||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
|
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
|
|
||||||
|
MMIO_D(0x4ab8, D_KBL);
|
||||||
|
MMIO_D(0x940c, D_SKL_PLUS);
|
||||||
|
MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
|
||||||
|
MMIO_D(0x4ab0, D_SKL | D_KBL);
|
||||||
|
MMIO_D(0x20d4, D_SKL | D_KBL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2873,7 +2893,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
||||||
ret = init_broadwell_mmio_info(gvt);
|
ret = init_broadwell_mmio_info(gvt);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
} else if (IS_SKYLAKE(dev_priv)) {
|
} else if (IS_SKYLAKE(dev_priv)
|
||||||
|
|| IS_KABYLAKE(dev_priv)) {
|
||||||
ret = init_broadwell_mmio_info(gvt);
|
ret = init_broadwell_mmio_info(gvt);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -580,7 +580,7 @@ static void gen8_init_irq(
|
||||||
|
|
||||||
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||||
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||||
} else if (IS_SKYLAKE(gvt->dev_priv)) {
|
} else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
|
||||||
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
|
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||||
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
|
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||||
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
|
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||||
|
@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
|
||||||
|
|
||||||
gvt_dbg_core("init irq framework\n");
|
gvt_dbg_core("init irq framework\n");
|
||||||
|
|
||||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(gvt->dev_priv)) {
|
||||||
irq->ops = &gen8_irq_ops;
|
irq->ops = &gen8_irq_ops;
|
||||||
irq->irq_map = gen8_irq_map;
|
irq->irq_map = gen8_irq_map;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -295,10 +295,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
|
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
|
||||||
"fence: %d\nresolution: %s\n",
|
"fence: %d\nresolution: %s\n"
|
||||||
|
"weight: %d\n",
|
||||||
BYTES_TO_MB(type->low_gm_size),
|
BYTES_TO_MB(type->low_gm_size),
|
||||||
BYTES_TO_MB(type->high_gm_size),
|
BYTES_TO_MB(type->high_gm_size),
|
||||||
type->fence, vgpu_edid_str(type->resolution));
|
type->fence, vgpu_edid_str(type->resolution),
|
||||||
|
type->weight);
|
||||||
}
|
}
|
||||||
|
|
||||||
static MDEV_TYPE_ATTR_RO(available_instances);
|
static MDEV_TYPE_ATTR_RO(available_instances);
|
||||||
|
@ -1146,8 +1148,40 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
vgpu_id_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||||
|
|
||||||
|
if (mdev) {
|
||||||
|
struct intel_vgpu *vgpu = (struct intel_vgpu *)
|
||||||
|
mdev_get_drvdata(mdev);
|
||||||
|
return sprintf(buf, "%d\n", vgpu->id);
|
||||||
|
}
|
||||||
|
return sprintf(buf, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR_RO(vgpu_id);
|
||||||
|
|
||||||
|
static struct attribute *intel_vgpu_attrs[] = {
|
||||||
|
&dev_attr_vgpu_id.attr,
|
||||||
|
NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group intel_vgpu_group = {
|
||||||
|
.name = "intel_vgpu",
|
||||||
|
.attrs = intel_vgpu_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group *intel_vgpu_groups[] = {
|
||||||
|
&intel_vgpu_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct mdev_parent_ops intel_vgpu_ops = {
|
static const struct mdev_parent_ops intel_vgpu_ops = {
|
||||||
.supported_type_groups = intel_vgpu_type_groups,
|
.supported_type_groups = intel_vgpu_type_groups,
|
||||||
|
.mdev_attr_groups = intel_vgpu_groups,
|
||||||
.create = intel_vgpu_create,
|
.create = intel_vgpu_create,
|
||||||
.remove = intel_vgpu_remove,
|
.remove = intel_vgpu_remove,
|
||||||
|
|
||||||
|
@ -1339,13 +1373,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||||
|
|
||||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu = info->vgpu;
|
|
||||||
|
|
||||||
if (!info) {
|
|
||||||
gvt_vgpu_err("kvmgt_guest_info invalid\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
|
||||||
kvmgt_protect_table_destroy(info);
|
kvmgt_protect_table_destroy(info);
|
||||||
gvt_cache_destroy(info->vgpu);
|
gvt_cache_destroy(info->vgpu);
|
||||||
|
|
|
@ -44,20 +44,21 @@ struct intel_vgpu;
|
||||||
#define D_HSW (1 << 2)
|
#define D_HSW (1 << 2)
|
||||||
#define D_BDW (1 << 3)
|
#define D_BDW (1 << 3)
|
||||||
#define D_SKL (1 << 4)
|
#define D_SKL (1 << 4)
|
||||||
|
#define D_KBL (1 << 5)
|
||||||
|
|
||||||
#define D_GEN9PLUS (D_SKL)
|
#define D_GEN9PLUS (D_SKL | D_KBL)
|
||||||
#define D_GEN8PLUS (D_BDW | D_SKL)
|
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
|
||||||
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
|
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
|
||||||
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
|
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
|
||||||
|
|
||||||
#define D_SKL_PLUS (D_SKL)
|
#define D_SKL_PLUS (D_SKL | D_KBL)
|
||||||
#define D_BDW_PLUS (D_BDW | D_SKL)
|
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
|
||||||
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
|
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
|
||||||
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
|
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
|
||||||
|
|
||||||
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
|
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
|
||||||
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
|
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
|
||||||
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
|
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
|
||||||
|
|
||||||
struct intel_gvt_mmio_info {
|
struct intel_gvt_mmio_info {
|
||||||
u32 offset;
|
u32 offset;
|
||||||
|
|
|
@ -126,6 +126,18 @@ static struct render_mmio gen9_render_mmio_list[] = {
|
||||||
{VCS2, _MMIO(0x1c028), 0xffff, false},
|
{VCS2, _MMIO(0x1c028), 0xffff, false},
|
||||||
|
|
||||||
{VECS, _MMIO(0x1a028), 0xffff, false},
|
{VECS, _MMIO(0x1a028), 0xffff, false},
|
||||||
|
|
||||||
|
{RCS, _MMIO(0x7304), 0xffff, true},
|
||||||
|
{RCS, _MMIO(0x2248), 0x0, false},
|
||||||
|
{RCS, _MMIO(0x940c), 0x0, false},
|
||||||
|
{RCS, _MMIO(0x4ab8), 0x0, false},
|
||||||
|
|
||||||
|
{RCS, _MMIO(0x4ab0), 0x0, false},
|
||||||
|
{RCS, _MMIO(0x20d4), 0x0, false},
|
||||||
|
|
||||||
|
{RCS, _MMIO(0xb004), 0x0, false},
|
||||||
|
{RCS, _MMIO(0x20a0), 0x0, false},
|
||||||
|
{RCS, _MMIO(0x20e4), 0xffff, false},
|
||||||
};
|
};
|
||||||
|
|
||||||
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
|
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
|
||||||
|
@ -159,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
||||||
*/
|
*/
|
||||||
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
|
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
|
||||||
FW_REG_READ | FW_REG_WRITE);
|
FW_REG_READ | FW_REG_WRITE);
|
||||||
if (ring_id == RCS && IS_SKYLAKE(dev_priv))
|
if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||||
fw |= FORCEWAKE_RENDER;
|
fw |= FORCEWAKE_RENDER;
|
||||||
|
|
||||||
intel_uncore_forcewake_get(dev_priv, fw);
|
intel_uncore_forcewake_get(dev_priv, fw);
|
||||||
|
@ -192,7 +204,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!IS_SKYLAKE(dev_priv))
|
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
offset.reg = regs[ring_id];
|
offset.reg = regs[ring_id];
|
||||||
|
@ -230,7 +242,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!IS_SKYLAKE(dev_priv))
|
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
offset.reg = regs[ring_id];
|
offset.reg = regs[ring_id];
|
||||||
|
@ -265,7 +277,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||||
u32 inhibit_mask =
|
u32 inhibit_mask =
|
||||||
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||||
|
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
||||||
mmio = gen9_render_mmio_list;
|
mmio = gen9_render_mmio_list;
|
||||||
array_size = ARRAY_SIZE(gen9_render_mmio_list);
|
array_size = ARRAY_SIZE(gen9_render_mmio_list);
|
||||||
load_mocs(vgpu, ring_id);
|
load_mocs(vgpu, ring_id);
|
||||||
|
@ -312,7 +325,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||||
u32 v;
|
u32 v;
|
||||||
int i, array_size;
|
int i, array_size;
|
||||||
|
|
||||||
if (IS_SKYLAKE(dev_priv)) {
|
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||||
mmio = gen9_render_mmio_list;
|
mmio = gen9_render_mmio_list;
|
||||||
array_size = ARRAY_SIZE(gen9_render_mmio_list);
|
array_size = ARRAY_SIZE(gen9_render_mmio_list);
|
||||||
restore_mocs(vgpu, ring_id);
|
restore_mocs(vgpu, ring_id);
|
||||||
|
|
|
@ -47,11 +47,87 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vgpu_sched_data {
|
||||||
|
struct list_head lru_list;
|
||||||
|
struct intel_vgpu *vgpu;
|
||||||
|
|
||||||
|
ktime_t sched_in_time;
|
||||||
|
ktime_t sched_out_time;
|
||||||
|
ktime_t sched_time;
|
||||||
|
ktime_t left_ts;
|
||||||
|
ktime_t allocated_ts;
|
||||||
|
|
||||||
|
struct vgpu_sched_ctl sched_ctl;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gvt_sched_data {
|
||||||
|
struct intel_gvt *gvt;
|
||||||
|
struct hrtimer timer;
|
||||||
|
unsigned long period;
|
||||||
|
struct list_head lru_runq_head;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
|
||||||
|
{
|
||||||
|
ktime_t delta_ts;
|
||||||
|
struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
|
||||||
|
|
||||||
|
delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
|
||||||
|
|
||||||
|
vgpu_data->sched_time += delta_ts;
|
||||||
|
vgpu_data->left_ts -= delta_ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GVT_TS_BALANCE_PERIOD_MS 100
|
||||||
|
#define GVT_TS_BALANCE_STAGE_NUM 10
|
||||||
|
|
||||||
|
static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
|
||||||
|
{
|
||||||
|
struct vgpu_sched_data *vgpu_data;
|
||||||
|
struct list_head *pos;
|
||||||
|
static uint64_t stage_check;
|
||||||
|
int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
|
||||||
|
|
||||||
|
/* The timeslice accumulation reset at stage 0, which is
|
||||||
|
* allocated again without adding previous debt.
|
||||||
|
*/
|
||||||
|
if (stage == 0) {
|
||||||
|
int total_weight = 0;
|
||||||
|
ktime_t fair_timeslice;
|
||||||
|
|
||||||
|
list_for_each(pos, &sched_data->lru_runq_head) {
|
||||||
|
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
|
||||||
|
total_weight += vgpu_data->sched_ctl.weight;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each(pos, &sched_data->lru_runq_head) {
|
||||||
|
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
|
||||||
|
fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
|
||||||
|
vgpu_data->sched_ctl.weight /
|
||||||
|
total_weight;
|
||||||
|
|
||||||
|
vgpu_data->allocated_ts = fair_timeslice;
|
||||||
|
vgpu_data->left_ts = vgpu_data->allocated_ts;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
list_for_each(pos, &sched_data->lru_runq_head) {
|
||||||
|
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
|
||||||
|
|
||||||
|
/* timeslice for next 100ms should add the left/debt
|
||||||
|
* slice of previous stages.
|
||||||
|
*/
|
||||||
|
vgpu_data->left_ts += vgpu_data->allocated_ts;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
enum intel_engine_id i;
|
enum intel_engine_id i;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
struct vgpu_sched_data *vgpu_data;
|
||||||
|
ktime_t cur_time;
|
||||||
|
|
||||||
/* no target to schedule */
|
/* no target to schedule */
|
||||||
if (!scheduler->next_vgpu)
|
if (!scheduler->next_vgpu)
|
||||||
|
@ -77,6 +153,15 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||||
gvt_dbg_sched("switch to next vgpu %d\n",
|
gvt_dbg_sched("switch to next vgpu %d\n",
|
||||||
scheduler->next_vgpu->id);
|
scheduler->next_vgpu->id);
|
||||||
|
|
||||||
|
cur_time = ktime_get();
|
||||||
|
if (scheduler->current_vgpu) {
|
||||||
|
vgpu_data = scheduler->current_vgpu->sched_data;
|
||||||
|
vgpu_data->sched_out_time = cur_time;
|
||||||
|
vgpu_update_timeslice(scheduler->current_vgpu);
|
||||||
|
}
|
||||||
|
vgpu_data = scheduler->next_vgpu->sched_data;
|
||||||
|
vgpu_data->sched_in_time = cur_time;
|
||||||
|
|
||||||
/* switch current vgpu */
|
/* switch current vgpu */
|
||||||
scheduler->current_vgpu = scheduler->next_vgpu;
|
scheduler->current_vgpu = scheduler->next_vgpu;
|
||||||
scheduler->next_vgpu = NULL;
|
scheduler->next_vgpu = NULL;
|
||||||
|
@ -88,62 +173,61 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||||
wake_up(&scheduler->waitq[i]);
|
wake_up(&scheduler->waitq[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct tbs_vgpu_data {
|
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
|
||||||
struct list_head list;
|
|
||||||
struct intel_vgpu *vgpu;
|
|
||||||
/* put some per-vgpu sched stats here */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct tbs_sched_data {
|
|
||||||
struct intel_gvt *gvt;
|
|
||||||
struct delayed_work work;
|
|
||||||
unsigned long period;
|
|
||||||
struct list_head runq_head;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
|
|
||||||
|
|
||||||
static void tbs_sched_func(struct work_struct *work)
|
|
||||||
{
|
{
|
||||||
struct tbs_sched_data *sched_data = container_of(work,
|
struct vgpu_sched_data *vgpu_data;
|
||||||
struct tbs_sched_data, work.work);
|
|
||||||
struct tbs_vgpu_data *vgpu_data;
|
|
||||||
|
|
||||||
struct intel_gvt *gvt = sched_data->gvt;
|
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
|
||||||
|
|
||||||
struct intel_vgpu *vgpu = NULL;
|
struct intel_vgpu *vgpu = NULL;
|
||||||
struct list_head *pos, *head;
|
struct list_head *head = &sched_data->lru_runq_head;
|
||||||
|
struct list_head *pos;
|
||||||
mutex_lock(&gvt->lock);
|
|
||||||
|
|
||||||
/* no vgpu or has already had a target */
|
|
||||||
if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (scheduler->current_vgpu) {
|
|
||||||
vgpu_data = scheduler->current_vgpu->sched_data;
|
|
||||||
head = &vgpu_data->list;
|
|
||||||
} else {
|
|
||||||
head = &sched_data->runq_head;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* search a vgpu with pending workload */
|
/* search a vgpu with pending workload */
|
||||||
list_for_each(pos, head) {
|
list_for_each(pos, head) {
|
||||||
if (pos == &sched_data->runq_head)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
|
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
|
||||||
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
|
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
vgpu = vgpu_data->vgpu;
|
/* Return the vGPU only if it has time slice left */
|
||||||
break;
|
if (vgpu_data->left_ts > 0) {
|
||||||
|
vgpu = vgpu_data->vgpu;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return vgpu;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* in nanosecond */
|
||||||
|
#define GVT_DEFAULT_TIME_SLICE 1000000
|
||||||
|
|
||||||
|
static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
||||||
|
{
|
||||||
|
struct intel_gvt *gvt = sched_data->gvt;
|
||||||
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
|
struct vgpu_sched_data *vgpu_data;
|
||||||
|
struct intel_vgpu *vgpu = NULL;
|
||||||
|
static uint64_t timer_check;
|
||||||
|
|
||||||
|
if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
|
||||||
|
gvt_balance_timeslice(sched_data);
|
||||||
|
|
||||||
|
/* no active vgpu or has already had a target */
|
||||||
|
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
vgpu = find_busy_vgpu(sched_data);
|
||||||
if (vgpu) {
|
if (vgpu) {
|
||||||
scheduler->next_vgpu = vgpu;
|
scheduler->next_vgpu = vgpu;
|
||||||
|
|
||||||
|
/* Move the last used vGPU to the tail of lru_list */
|
||||||
|
vgpu_data = vgpu->sched_data;
|
||||||
|
list_del_init(&vgpu_data->lru_list);
|
||||||
|
list_add_tail(&vgpu_data->lru_list,
|
||||||
|
&sched_data->lru_runq_head);
|
||||||
|
|
||||||
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
|
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
|
||||||
|
} else {
|
||||||
|
scheduler->next_vgpu = gvt->idle_vgpu;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (scheduler->next_vgpu) {
|
if (scheduler->next_vgpu) {
|
||||||
|
@ -151,34 +235,49 @@ static void tbs_sched_func(struct work_struct *work)
|
||||||
scheduler->next_vgpu->id);
|
scheduler->next_vgpu->id);
|
||||||
try_to_schedule_next_vgpu(gvt);
|
try_to_schedule_next_vgpu(gvt);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
void intel_gvt_schedule(struct intel_gvt *gvt)
|
||||||
* still have vgpu on runq
|
{
|
||||||
* or last schedule haven't finished due to running workload
|
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
|
||||||
*/
|
|
||||||
if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
|
|
||||||
schedule_delayed_work(&sched_data->work, sched_data->period);
|
|
||||||
|
|
||||||
|
mutex_lock(&gvt->lock);
|
||||||
|
tbs_sched_func(sched_data);
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
|
||||||
|
{
|
||||||
|
struct gvt_sched_data *data;
|
||||||
|
|
||||||
|
data = container_of(timer_data, struct gvt_sched_data, timer);
|
||||||
|
|
||||||
|
intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
|
||||||
|
|
||||||
|
hrtimer_add_expires_ns(&data->timer, data->period);
|
||||||
|
|
||||||
|
return HRTIMER_RESTART;
|
||||||
|
}
|
||||||
|
|
||||||
static int tbs_sched_init(struct intel_gvt *gvt)
|
static int tbs_sched_init(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler =
|
struct intel_gvt_workload_scheduler *scheduler =
|
||||||
&gvt->scheduler;
|
&gvt->scheduler;
|
||||||
|
|
||||||
struct tbs_sched_data *data;
|
struct gvt_sched_data *data;
|
||||||
|
|
||||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&data->runq_head);
|
INIT_LIST_HEAD(&data->lru_runq_head);
|
||||||
INIT_DELAYED_WORK(&data->work, tbs_sched_func);
|
hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||||
|
data->timer.function = tbs_timer_fn;
|
||||||
data->period = GVT_DEFAULT_TIME_SLICE;
|
data->period = GVT_DEFAULT_TIME_SLICE;
|
||||||
data->gvt = gvt;
|
data->gvt = gvt;
|
||||||
|
|
||||||
scheduler->sched_data = data;
|
scheduler->sched_data = data;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,25 +285,28 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler =
|
struct intel_gvt_workload_scheduler *scheduler =
|
||||||
&gvt->scheduler;
|
&gvt->scheduler;
|
||||||
struct tbs_sched_data *data = scheduler->sched_data;
|
struct gvt_sched_data *data = scheduler->sched_data;
|
||||||
|
|
||||||
|
hrtimer_cancel(&data->timer);
|
||||||
|
|
||||||
cancel_delayed_work(&data->work);
|
|
||||||
kfree(data);
|
kfree(data);
|
||||||
scheduler->sched_data = NULL;
|
scheduler->sched_data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct tbs_vgpu_data *data;
|
struct vgpu_sched_data *data;
|
||||||
|
|
||||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
data->sched_ctl.weight = vgpu->sched_ctl.weight;
|
||||||
data->vgpu = vgpu;
|
data->vgpu = vgpu;
|
||||||
INIT_LIST_HEAD(&data->list);
|
INIT_LIST_HEAD(&data->lru_list);
|
||||||
|
|
||||||
vgpu->sched_data = data;
|
vgpu->sched_data = data;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,21 +318,24 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
|
struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
|
||||||
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
|
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||||
|
|
||||||
if (!list_empty(&vgpu_data->list))
|
if (!list_empty(&vgpu_data->lru_list))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
|
list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
|
||||||
schedule_delayed_work(&sched_data->work, 0);
|
|
||||||
|
if (!hrtimer_active(&sched_data->timer))
|
||||||
|
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
|
||||||
|
sched_data->period), HRTIMER_MODE_ABS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
|
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||||
|
|
||||||
list_del_init(&vgpu_data->list);
|
list_del_init(&vgpu_data->lru_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
||||||
|
|
|
@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
|
||||||
void (*stop_schedule)(struct intel_vgpu *vgpu);
|
void (*stop_schedule)(struct intel_vgpu *vgpu);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void intel_gvt_schedule(struct intel_gvt *gvt);
|
||||||
|
|
||||||
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
|
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
|
||||||
|
|
||||||
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
|
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
|
||||||
|
|
|
@ -448,7 +448,8 @@ static int workload_thread(void *priv)
|
||||||
struct intel_vgpu_workload *workload = NULL;
|
struct intel_vgpu_workload *workload = NULL;
|
||||||
struct intel_vgpu *vgpu = NULL;
|
struct intel_vgpu *vgpu = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
|
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
|
||||||
|
|| IS_KABYLAKE(gvt->dev_priv);
|
||||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||||
|
|
||||||
kfree(p);
|
kfree(p);
|
||||||
|
|
|
@ -67,7 +67,6 @@ struct shadow_per_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_shadow_wa_ctx {
|
struct intel_shadow_wa_ctx {
|
||||||
struct intel_vgpu_workload *workload;
|
|
||||||
struct shadow_indirect_ctx indirect_ctx;
|
struct shadow_indirect_ctx indirect_ctx;
|
||||||
struct shadow_per_ctx per_ctx;
|
struct shadow_per_ctx per_ctx;
|
||||||
|
|
||||||
|
|
|
@ -64,18 +64,28 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||||
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VGPU_MAX_WEIGHT 16
|
||||||
|
#define VGPU_WEIGHT(vgpu_num) \
|
||||||
|
(VGPU_MAX_WEIGHT / (vgpu_num))
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
unsigned int low_mm;
|
unsigned int low_mm;
|
||||||
unsigned int high_mm;
|
unsigned int high_mm;
|
||||||
unsigned int fence;
|
unsigned int fence;
|
||||||
|
|
||||||
|
/* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
|
||||||
|
* with a weight of 4 on a contended host, different vGPU type has
|
||||||
|
* different weight set. Legal weights range from 1 to 16.
|
||||||
|
*/
|
||||||
|
unsigned int weight;
|
||||||
enum intel_vgpu_edid edid;
|
enum intel_vgpu_edid edid;
|
||||||
char *name;
|
char *name;
|
||||||
} vgpu_types[] = {
|
} vgpu_types[] = {
|
||||||
/* Fixed vGPU type table */
|
/* Fixed vGPU type table */
|
||||||
{ MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
|
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
|
||||||
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
|
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
|
||||||
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
|
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
|
||||||
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
|
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -120,6 +130,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
||||||
gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
|
gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
|
||||||
gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
|
gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
|
||||||
gvt->types[i].fence = vgpu_types[i].fence;
|
gvt->types[i].fence = vgpu_types[i].fence;
|
||||||
|
|
||||||
|
if (vgpu_types[i].weight < 1 ||
|
||||||
|
vgpu_types[i].weight > VGPU_MAX_WEIGHT)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
gvt->types[i].weight = vgpu_types[i].weight;
|
||||||
gvt->types[i].resolution = vgpu_types[i].edid;
|
gvt->types[i].resolution = vgpu_types[i].edid;
|
||||||
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
|
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
|
||||||
high_avail / vgpu_types[i].high_mm);
|
high_avail / vgpu_types[i].high_mm);
|
||||||
|
@ -131,11 +147,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
||||||
sprintf(gvt->types[i].name, "GVTg_V5_%s",
|
sprintf(gvt->types[i].name, "GVTg_V5_%s",
|
||||||
vgpu_types[i].name);
|
vgpu_types[i].name);
|
||||||
|
|
||||||
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
|
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
|
||||||
i, gvt->types[i].name,
|
i, gvt->types[i].name,
|
||||||
gvt->types[i].avail_instance,
|
gvt->types[i].avail_instance,
|
||||||
gvt->types[i].low_gm_size,
|
gvt->types[i].low_gm_size,
|
||||||
gvt->types[i].high_gm_size, gvt->types[i].fence,
|
gvt->types[i].high_gm_size, gvt->types[i].fence,
|
||||||
|
gvt->types[i].weight,
|
||||||
vgpu_edid_str(gvt->types[i].resolution));
|
vgpu_edid_str(gvt->types[i].resolution));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,6 +233,59 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define IDLE_VGPU_IDR 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_create_idle_vgpu - create an idle virtual GPU
|
||||||
|
* @gvt: GVT device
|
||||||
|
*
|
||||||
|
* This function is called when user wants to create an idle virtual GPU.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* pointer to intel_vgpu, error pointer if failed.
|
||||||
|
*/
|
||||||
|
struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
|
||||||
|
{
|
||||||
|
struct intel_vgpu *vgpu;
|
||||||
|
enum intel_engine_id i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
vgpu = vzalloc(sizeof(*vgpu));
|
||||||
|
if (!vgpu)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
vgpu->id = IDLE_VGPU_IDR;
|
||||||
|
vgpu->gvt = gvt;
|
||||||
|
|
||||||
|
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||||
|
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
|
||||||
|
|
||||||
|
ret = intel_vgpu_init_sched_policy(vgpu);
|
||||||
|
if (ret)
|
||||||
|
goto out_free_vgpu;
|
||||||
|
|
||||||
|
vgpu->active = false;
|
||||||
|
|
||||||
|
return vgpu;
|
||||||
|
|
||||||
|
out_free_vgpu:
|
||||||
|
vfree(vgpu);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_destroy_vgpu - destroy an idle virtual GPU
|
||||||
|
* @vgpu: virtual GPU
|
||||||
|
*
|
||||||
|
* This function is called when user wants to destroy an idle virtual GPU.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
intel_vgpu_clean_sched_policy(vgpu);
|
||||||
|
vfree(vgpu);
|
||||||
|
}
|
||||||
|
|
||||||
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
struct intel_vgpu_creation_params *param)
|
struct intel_vgpu_creation_params *param)
|
||||||
{
|
{
|
||||||
|
@ -232,13 +302,15 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
|
|
||||||
mutex_lock(&gvt->lock);
|
mutex_lock(&gvt->lock);
|
||||||
|
|
||||||
ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
|
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_free_vgpu;
|
goto out_free_vgpu;
|
||||||
|
|
||||||
vgpu->id = ret;
|
vgpu->id = ret;
|
||||||
vgpu->handle = param->handle;
|
vgpu->handle = param->handle;
|
||||||
vgpu->gvt = gvt;
|
vgpu->gvt = gvt;
|
||||||
|
vgpu->sched_ctl.weight = param->weight;
|
||||||
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
||||||
|
|
||||||
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
||||||
|
@ -325,6 +397,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||||
param.low_gm_sz = type->low_gm_size;
|
param.low_gm_sz = type->low_gm_size;
|
||||||
param.high_gm_sz = type->high_gm_size;
|
param.high_gm_sz = type->high_gm_size;
|
||||||
param.fence_sz = type->fence;
|
param.fence_sz = type->fence;
|
||||||
|
param.weight = type->weight;
|
||||||
param.resolution = type->resolution;
|
param.resolution = type->resolution;
|
||||||
|
|
||||||
/* XXX current param based on MB */
|
/* XXX current param based on MB */
|
||||||
|
|
|
@ -45,6 +45,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
|
||||||
return true;
|
return true;
|
||||||
if (IS_SKYLAKE(dev_priv))
|
if (IS_SKYLAKE(dev_priv))
|
||||||
return true;
|
return true;
|
||||||
|
if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
|
||||||
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue